repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
daethnir/authprogs
|
authprogs/authprogs.py
|
AuthProgs.check_keyname
|
python
|
def check_keyname(self, rule):
keynames = rule.get('keynames')
if not keynames:
self.logdebug('no keynames requirement.\n')
return True
if not isinstance(keynames, list):
keynames = [keynames]
if self.keyname in keynames:
self.logdebug('keyname "%s" matches rule.\n' % self.keyname)
return True
else:
self.logdebug('keyname "%s" does not match rule.\n' % self.keyname)
return False
|
If a key name is specified, verify it is permitted.
|
train
|
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/authprogs/authprogs.py#L164-L179
|
[
"def logdebug(self, message):\n \"\"\"Log debugging information.\"\"\"\n if self.debug:\n self.log(message)\n"
] |
class AuthProgs(object): # pylint: disable-msg=R0902
"""AuthProgs class"""
def __init__(self, logfile=None, configfile=None,
configdir=None, debug=False, **kwargs):
"""AuthProgs constructor.
kwargs include:
authprogs_binary: path to this binary, when creating
authorized_keys entries.
If not specified, determines from sys.argv[0]
name: the name of this key, for matching in rules.
"""
self.debug = debug
self.logfile = logfile
self.client_ip = None
if logfile:
self.logfh = open(logfile, 'a')
else:
self.logfh = False
if kwargs.get('authprogs_binary'):
self.authprogs_binary = kwargs['authprogs_binary']
else:
self.authprogs_binary = (
os.path.abspath(os.path.abspath(sys.argv[0])))
self.original_command_string = os.environ.get(
'SSH_ORIGINAL_COMMAND', '')
self.original_command_list = self.original_command_string.split()
self.keyname = kwargs.get('keyname')
if not self.keyname:
self.keyname = ''
if ' ' in self.keyname or '\t' in self.keyname:
self.log('FATAL: keyname contains space/tabs\n')
raise Error('--keyname may contain neither spaces nor tabs.')
self.yamldocs = None
self.configfile = configfile
self.configdir = configdir
def raise_and_log_error(self, error, message):
"""Raise error, including message and original traceback.
error: the error to raise
message: the user-facing error message
"""
self.log('raising %s, traceback %s\n' %
(error, traceback.format_exc()))
raise error(message)
def get_client_ip(self):
"""Return the client IP from the environment."""
if self.client_ip:
return self.client_ip
try:
client = os.environ.get('SSH_CONNECTION',
os.environ.get('SSH_CLIENT'))
self.client_ip = client.split()[0]
self.logdebug('client_ip: %s\n' % self.client_ip)
return self.client_ip
except:
raise SSHEnvironmentError('cannot identify the ssh client '
'IP address')
def logdebug(self, message):
"""Log debugging information."""
if self.debug:
self.log(message)
def log(self, message):
"""Log information."""
if self.logfh:
self.logfh.write(message) # pylint: disable-msg=E1103
def check_client_ip(self, rule):
"""If a client IP is specified, verify it is permitted."""
if not rule.get('from'):
self.logdebug('no "from" requirement.\n')
return True
allow_from = rule.get('from')
if not isinstance(allow_from, list):
allow_from = [allow_from]
client_ip = self.get_client_ip()
if client_ip in allow_from:
self.logdebug('client_ip %s in %s\n' % (client_ip, allow_from))
return True
else:
self.logdebug('client_ip %s not in %s' % (client_ip, allow_from))
return False
def get_merged_config(self):
"""Get merged config file.
Returns an open StringIO containing the
merged config file.
"""
if self.yamldocs:
return
loadfiles = []
if self.configfile:
loadfiles.append(self.configfile)
if self.configdir:
# Gets list of all non-dotfile files from configdir.
loadfiles.extend(
[f for f in
[os.path.join(self.configdir, x) for x in
os.listdir(self.configdir)]
if os.path.isfile(f) and
not os.path.basename(f).startswith('.')])
merged_configfile = io.StringIO()
merged_configfile.write('-\n')
for thefile in loadfiles:
self.logdebug('reading in config file %s\n' % thefile)
merged_configfile.write(open(thefile).read())
merged_configfile.write('\n-\n')
merged_configfile.seek(0)
self.logdebug('merged log file: """\n%s\n"""\n' %
merged_configfile.read())
merged_configfile.seek(0)
return merged_configfile
def load(self):
"""Load our config, log and raise on error."""
try:
merged_configfile = self.get_merged_config()
self.yamldocs = yaml.load(merged_configfile, Loader=Loader)
# Strip out the top level 'None's we get from concatenation.
# Functionally not required, but makes dumps cleaner.
self.yamldocs = [x for x in self.yamldocs if x]
self.logdebug('parsed_rules:\n%s\n' % pretty(self.yamldocs))
except (yaml.scanner.ScannerError, yaml.parser.ParserError):
self.raise_and_log_error(ConfigError, 'error parsing config.')
def dump_config(self):
"""Pretty print the configuration dict to stdout."""
yaml_content = self.get_merged_config()
print('YAML Configuration\n%s\n' % yaml_content.read())
try:
self.load()
print('Python Configuration\n%s\n' % pretty(self.yamldocs))
except ConfigError:
sys.stderr.write(
'config parse error. try running with --logfile=/dev/tty\n')
raise
def install_key_data(self, keydata, target):
"""Install the key data into the open file."""
target.seek(0)
contents = target.read()
ssh_opts = 'no-port-forwarding'
if keydata in contents:
raise InstallError('key data already in file - refusing '
'to double-install.\n')
command = '%s --run' % self.authprogs_binary
if self.logfile:
command += ' --logfile=%s' % self.logfile
if self.keyname:
command += ' --keyname=%s' % self.keyname
target.write('command="%(command)s",%(ssh_opts)s %(keydata)s\n' %
{'command': command,
'keydata': keydata,
'ssh_opts': ssh_opts})
def install_key(self, keyfile, authorized_keys):
"""Install a key into the authorized_keys file."""
# Make the directory containing the authorized_keys
# file, if it doesn't exist. (Typically ~/.ssh).
# Ignore errors; we'll fail shortly if we can't
# create the authkeys file.
try:
os.makedirs(os.path.dirname(authorized_keys), 0o700)
except OSError:
pass
keydata = open(keyfile).read()
target_fd = os.open(authorized_keys, os.O_RDWR | os.O_CREAT, 0o600)
self.install_key_data(keydata, os.fdopen(target_fd, 'w+'))
def find_match_scp(self, rule): # pylint: disable-msg=R0911,R0912
"""Handle scp commands."""
orig_list = []
orig_list.extend(self.original_command_list)
binary = orig_list.pop(0)
allowed_binaries = ['scp', '/usr/bin/scp']
if binary not in allowed_binaries:
self.logdebug('skipping scp processing - binary "%s" '
'not in approved list.\n' % binary)
return
filepath = orig_list.pop()
arguments = orig_list
if '-f' in arguments:
if not rule.get('allow_download'):
self.logdebug('scp denied - downloading forbidden.\n')
return
if '-t' in arguments:
if not rule.get('allow_upload'):
self.log('scp denied - uploading forbidden.\n')
return
if '-r' in arguments:
if not rule.get('allow_recursion'):
self.log('scp denied - recursive transfers forbidden.\n')
return
if '-p' in arguments:
if not rule.get('allow_permissions', 'true'):
self.log('scp denied - set/getting permissions '
'forbidden.\n')
return
if rule.get('files'):
files = rule.get('files')
if not isinstance(files, list):
files = [files]
if filepath not in files:
self.log('scp denied - file "%s" - not in approved '
'list %s\n' % (filepath, files))
return
# Allow it!
return {'command': self.original_command_list}
def find_match_command(self, rule):
"""Return a matching (possibly munged) command, if found in rule."""
command_string = rule['command']
command_list = command_string.split()
self.logdebug('comparing "%s" to "%s"\n' %
(command_list, self.original_command_list))
if rule.get('allow_trailing_args'):
self.logdebug('allow_trailing_args is true - comparing initial '
'list.\n')
# Verify the initial arguments are all the same
if (self.original_command_list[:len(command_list)] ==
command_list):
self.logdebug('initial list is same\n')
return {'command': self.original_command_list}
else:
self.logdebug('initial list is not same\n')
elif rule.get('pcre_match'):
if re.search(command_string, self.original_command_string):
return {'command': self.original_command_list}
elif command_list == self.original_command_list:
return {'command': command_list}
def find_match(self):
"""Load the config and find a matching rule.
returns the results of find_match_command, a dict of
the command and (in the future) other metadata.
"""
self.load()
for yamldoc in self.yamldocs:
self.logdebug('\nchecking rule """%s"""\n' % yamldoc)
if not yamldoc:
continue
if not self.check_client_ip(yamldoc):
# Rejected - Client IP does not match
continue
if not self.check_keyname(yamldoc):
# Rejected - keyname does not match
continue
rules = yamldoc.get('allow')
if not isinstance(rules, list):
rules = [rules]
for rule in rules:
rule_type = rule.get('rule_type', 'command')
if rule_type == 'command':
sub = self.find_match_command
elif rule_type == 'scp':
sub = self.find_match_scp
else:
self.log('fatal: no such rule_type "%s"\n' % rule_type)
self.raise_and_log_error(ConfigError,
'error parsing config.')
match = sub(rule)
if match:
return match
# No matches, time to give up.
raise CommandRejected('command "%s" denied.' %
self.original_command_string)
def exec_command(self):
"""Glean the command to run and exec.
On problems, sys.exit.
This method should *never* return.
"""
if not self.original_command_string:
raise SSHEnvironmentError('no SSH command found; '
'interactive shell disallowed.')
command_info = {'from': self.get_client_ip(),
'keyname': self.keyname,
'ssh_original_comand': self.original_command_string,
'time': time.time()}
os.environ['AUTHPROGS_KEYNAME'] = self.keyname
retcode = 126
try:
match = self.find_match()
command_info['command'] = match.get('command')
self.logdebug('find_match returned "%s"\n' % match)
command = match['command']
retcode = subprocess.call(command)
command_info['code'] = retcode
self.log('result: %s\n' % command_info)
sys.exit(retcode)
except (CommandRejected, OSError) as err:
command_info['exception'] = '%s' % err
self.log('result: %s\n' % command_info)
sys.exit(retcode)
|
daethnir/authprogs
|
authprogs/authprogs.py
|
AuthProgs.check_client_ip
|
python
|
def check_client_ip(self, rule):
if not rule.get('from'):
self.logdebug('no "from" requirement.\n')
return True
allow_from = rule.get('from')
if not isinstance(allow_from, list):
allow_from = [allow_from]
client_ip = self.get_client_ip()
if client_ip in allow_from:
self.logdebug('client_ip %s in %s\n' % (client_ip, allow_from))
return True
else:
self.logdebug('client_ip %s not in %s' % (client_ip, allow_from))
return False
|
If a client IP is specified, verify it is permitted.
|
train
|
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/authprogs/authprogs.py#L181-L198
|
[
"def get_client_ip(self):\n \"\"\"Return the client IP from the environment.\"\"\"\n\n if self.client_ip:\n return self.client_ip\n\n try:\n client = os.environ.get('SSH_CONNECTION',\n os.environ.get('SSH_CLIENT'))\n self.client_ip = client.split()[0]\n self.logdebug('client_ip: %s\\n' % self.client_ip)\n return self.client_ip\n except:\n raise SSHEnvironmentError('cannot identify the ssh client '\n 'IP address')\n",
"def logdebug(self, message):\n \"\"\"Log debugging information.\"\"\"\n if self.debug:\n self.log(message)\n"
] |
class AuthProgs(object): # pylint: disable-msg=R0902
"""AuthProgs class"""
def __init__(self, logfile=None, configfile=None,
configdir=None, debug=False, **kwargs):
"""AuthProgs constructor.
kwargs include:
authprogs_binary: path to this binary, when creating
authorized_keys entries.
If not specified, determines from sys.argv[0]
name: the name of this key, for matching in rules.
"""
self.debug = debug
self.logfile = logfile
self.client_ip = None
if logfile:
self.logfh = open(logfile, 'a')
else:
self.logfh = False
if kwargs.get('authprogs_binary'):
self.authprogs_binary = kwargs['authprogs_binary']
else:
self.authprogs_binary = (
os.path.abspath(os.path.abspath(sys.argv[0])))
self.original_command_string = os.environ.get(
'SSH_ORIGINAL_COMMAND', '')
self.original_command_list = self.original_command_string.split()
self.keyname = kwargs.get('keyname')
if not self.keyname:
self.keyname = ''
if ' ' in self.keyname or '\t' in self.keyname:
self.log('FATAL: keyname contains space/tabs\n')
raise Error('--keyname may contain neither spaces nor tabs.')
self.yamldocs = None
self.configfile = configfile
self.configdir = configdir
def raise_and_log_error(self, error, message):
"""Raise error, including message and original traceback.
error: the error to raise
message: the user-facing error message
"""
self.log('raising %s, traceback %s\n' %
(error, traceback.format_exc()))
raise error(message)
def get_client_ip(self):
"""Return the client IP from the environment."""
if self.client_ip:
return self.client_ip
try:
client = os.environ.get('SSH_CONNECTION',
os.environ.get('SSH_CLIENT'))
self.client_ip = client.split()[0]
self.logdebug('client_ip: %s\n' % self.client_ip)
return self.client_ip
except:
raise SSHEnvironmentError('cannot identify the ssh client '
'IP address')
def logdebug(self, message):
"""Log debugging information."""
if self.debug:
self.log(message)
def log(self, message):
"""Log information."""
if self.logfh:
self.logfh.write(message) # pylint: disable-msg=E1103
def check_keyname(self, rule):
"""If a key name is specified, verify it is permitted."""
keynames = rule.get('keynames')
if not keynames:
self.logdebug('no keynames requirement.\n')
return True
if not isinstance(keynames, list):
keynames = [keynames]
if self.keyname in keynames:
self.logdebug('keyname "%s" matches rule.\n' % self.keyname)
return True
else:
self.logdebug('keyname "%s" does not match rule.\n' % self.keyname)
return False
def get_merged_config(self):
"""Get merged config file.
Returns an open StringIO containing the
merged config file.
"""
if self.yamldocs:
return
loadfiles = []
if self.configfile:
loadfiles.append(self.configfile)
if self.configdir:
# Gets list of all non-dotfile files from configdir.
loadfiles.extend(
[f for f in
[os.path.join(self.configdir, x) for x in
os.listdir(self.configdir)]
if os.path.isfile(f) and
not os.path.basename(f).startswith('.')])
merged_configfile = io.StringIO()
merged_configfile.write('-\n')
for thefile in loadfiles:
self.logdebug('reading in config file %s\n' % thefile)
merged_configfile.write(open(thefile).read())
merged_configfile.write('\n-\n')
merged_configfile.seek(0)
self.logdebug('merged log file: """\n%s\n"""\n' %
merged_configfile.read())
merged_configfile.seek(0)
return merged_configfile
def load(self):
"""Load our config, log and raise on error."""
try:
merged_configfile = self.get_merged_config()
self.yamldocs = yaml.load(merged_configfile, Loader=Loader)
# Strip out the top level 'None's we get from concatenation.
# Functionally not required, but makes dumps cleaner.
self.yamldocs = [x for x in self.yamldocs if x]
self.logdebug('parsed_rules:\n%s\n' % pretty(self.yamldocs))
except (yaml.scanner.ScannerError, yaml.parser.ParserError):
self.raise_and_log_error(ConfigError, 'error parsing config.')
def dump_config(self):
"""Pretty print the configuration dict to stdout."""
yaml_content = self.get_merged_config()
print('YAML Configuration\n%s\n' % yaml_content.read())
try:
self.load()
print('Python Configuration\n%s\n' % pretty(self.yamldocs))
except ConfigError:
sys.stderr.write(
'config parse error. try running with --logfile=/dev/tty\n')
raise
def install_key_data(self, keydata, target):
"""Install the key data into the open file."""
target.seek(0)
contents = target.read()
ssh_opts = 'no-port-forwarding'
if keydata in contents:
raise InstallError('key data already in file - refusing '
'to double-install.\n')
command = '%s --run' % self.authprogs_binary
if self.logfile:
command += ' --logfile=%s' % self.logfile
if self.keyname:
command += ' --keyname=%s' % self.keyname
target.write('command="%(command)s",%(ssh_opts)s %(keydata)s\n' %
{'command': command,
'keydata': keydata,
'ssh_opts': ssh_opts})
def install_key(self, keyfile, authorized_keys):
"""Install a key into the authorized_keys file."""
# Make the directory containing the authorized_keys
# file, if it doesn't exist. (Typically ~/.ssh).
# Ignore errors; we'll fail shortly if we can't
# create the authkeys file.
try:
os.makedirs(os.path.dirname(authorized_keys), 0o700)
except OSError:
pass
keydata = open(keyfile).read()
target_fd = os.open(authorized_keys, os.O_RDWR | os.O_CREAT, 0o600)
self.install_key_data(keydata, os.fdopen(target_fd, 'w+'))
def find_match_scp(self, rule): # pylint: disable-msg=R0911,R0912
"""Handle scp commands."""
orig_list = []
orig_list.extend(self.original_command_list)
binary = orig_list.pop(0)
allowed_binaries = ['scp', '/usr/bin/scp']
if binary not in allowed_binaries:
self.logdebug('skipping scp processing - binary "%s" '
'not in approved list.\n' % binary)
return
filepath = orig_list.pop()
arguments = orig_list
if '-f' in arguments:
if not rule.get('allow_download'):
self.logdebug('scp denied - downloading forbidden.\n')
return
if '-t' in arguments:
if not rule.get('allow_upload'):
self.log('scp denied - uploading forbidden.\n')
return
if '-r' in arguments:
if not rule.get('allow_recursion'):
self.log('scp denied - recursive transfers forbidden.\n')
return
if '-p' in arguments:
if not rule.get('allow_permissions', 'true'):
self.log('scp denied - set/getting permissions '
'forbidden.\n')
return
if rule.get('files'):
files = rule.get('files')
if not isinstance(files, list):
files = [files]
if filepath not in files:
self.log('scp denied - file "%s" - not in approved '
'list %s\n' % (filepath, files))
return
# Allow it!
return {'command': self.original_command_list}
def find_match_command(self, rule):
"""Return a matching (possibly munged) command, if found in rule."""
command_string = rule['command']
command_list = command_string.split()
self.logdebug('comparing "%s" to "%s"\n' %
(command_list, self.original_command_list))
if rule.get('allow_trailing_args'):
self.logdebug('allow_trailing_args is true - comparing initial '
'list.\n')
# Verify the initial arguments are all the same
if (self.original_command_list[:len(command_list)] ==
command_list):
self.logdebug('initial list is same\n')
return {'command': self.original_command_list}
else:
self.logdebug('initial list is not same\n')
elif rule.get('pcre_match'):
if re.search(command_string, self.original_command_string):
return {'command': self.original_command_list}
elif command_list == self.original_command_list:
return {'command': command_list}
def find_match(self):
"""Load the config and find a matching rule.
returns the results of find_match_command, a dict of
the command and (in the future) other metadata.
"""
self.load()
for yamldoc in self.yamldocs:
self.logdebug('\nchecking rule """%s"""\n' % yamldoc)
if not yamldoc:
continue
if not self.check_client_ip(yamldoc):
# Rejected - Client IP does not match
continue
if not self.check_keyname(yamldoc):
# Rejected - keyname does not match
continue
rules = yamldoc.get('allow')
if not isinstance(rules, list):
rules = [rules]
for rule in rules:
rule_type = rule.get('rule_type', 'command')
if rule_type == 'command':
sub = self.find_match_command
elif rule_type == 'scp':
sub = self.find_match_scp
else:
self.log('fatal: no such rule_type "%s"\n' % rule_type)
self.raise_and_log_error(ConfigError,
'error parsing config.')
match = sub(rule)
if match:
return match
# No matches, time to give up.
raise CommandRejected('command "%s" denied.' %
self.original_command_string)
def exec_command(self):
"""Glean the command to run and exec.
On problems, sys.exit.
This method should *never* return.
"""
if not self.original_command_string:
raise SSHEnvironmentError('no SSH command found; '
'interactive shell disallowed.')
command_info = {'from': self.get_client_ip(),
'keyname': self.keyname,
'ssh_original_comand': self.original_command_string,
'time': time.time()}
os.environ['AUTHPROGS_KEYNAME'] = self.keyname
retcode = 126
try:
match = self.find_match()
command_info['command'] = match.get('command')
self.logdebug('find_match returned "%s"\n' % match)
command = match['command']
retcode = subprocess.call(command)
command_info['code'] = retcode
self.log('result: %s\n' % command_info)
sys.exit(retcode)
except (CommandRejected, OSError) as err:
command_info['exception'] = '%s' % err
self.log('result: %s\n' % command_info)
sys.exit(retcode)
|
daethnir/authprogs
|
authprogs/authprogs.py
|
AuthProgs.get_merged_config
|
python
|
def get_merged_config(self):
if self.yamldocs:
return
loadfiles = []
if self.configfile:
loadfiles.append(self.configfile)
if self.configdir:
# Gets list of all non-dotfile files from configdir.
loadfiles.extend(
[f for f in
[os.path.join(self.configdir, x) for x in
os.listdir(self.configdir)]
if os.path.isfile(f) and
not os.path.basename(f).startswith('.')])
merged_configfile = io.StringIO()
merged_configfile.write('-\n')
for thefile in loadfiles:
self.logdebug('reading in config file %s\n' % thefile)
merged_configfile.write(open(thefile).read())
merged_configfile.write('\n-\n')
merged_configfile.seek(0)
self.logdebug('merged log file: """\n%s\n"""\n' %
merged_configfile.read())
merged_configfile.seek(0)
return merged_configfile
|
Get merged config file.
Returns an open StringIO containing the
merged config file.
|
train
|
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/authprogs/authprogs.py#L200-L232
|
[
"def logdebug(self, message):\n \"\"\"Log debugging information.\"\"\"\n if self.debug:\n self.log(message)\n"
] |
class AuthProgs(object): # pylint: disable-msg=R0902
"""AuthProgs class"""
def __init__(self, logfile=None, configfile=None,
configdir=None, debug=False, **kwargs):
"""AuthProgs constructor.
kwargs include:
authprogs_binary: path to this binary, when creating
authorized_keys entries.
If not specified, determines from sys.argv[0]
name: the name of this key, for matching in rules.
"""
self.debug = debug
self.logfile = logfile
self.client_ip = None
if logfile:
self.logfh = open(logfile, 'a')
else:
self.logfh = False
if kwargs.get('authprogs_binary'):
self.authprogs_binary = kwargs['authprogs_binary']
else:
self.authprogs_binary = (
os.path.abspath(os.path.abspath(sys.argv[0])))
self.original_command_string = os.environ.get(
'SSH_ORIGINAL_COMMAND', '')
self.original_command_list = self.original_command_string.split()
self.keyname = kwargs.get('keyname')
if not self.keyname:
self.keyname = ''
if ' ' in self.keyname or '\t' in self.keyname:
self.log('FATAL: keyname contains space/tabs\n')
raise Error('--keyname may contain neither spaces nor tabs.')
self.yamldocs = None
self.configfile = configfile
self.configdir = configdir
def raise_and_log_error(self, error, message):
"""Raise error, including message and original traceback.
error: the error to raise
message: the user-facing error message
"""
self.log('raising %s, traceback %s\n' %
(error, traceback.format_exc()))
raise error(message)
def get_client_ip(self):
"""Return the client IP from the environment."""
if self.client_ip:
return self.client_ip
try:
client = os.environ.get('SSH_CONNECTION',
os.environ.get('SSH_CLIENT'))
self.client_ip = client.split()[0]
self.logdebug('client_ip: %s\n' % self.client_ip)
return self.client_ip
except:
raise SSHEnvironmentError('cannot identify the ssh client '
'IP address')
def logdebug(self, message):
"""Log debugging information."""
if self.debug:
self.log(message)
def log(self, message):
"""Log information."""
if self.logfh:
self.logfh.write(message) # pylint: disable-msg=E1103
def check_keyname(self, rule):
"""If a key name is specified, verify it is permitted."""
keynames = rule.get('keynames')
if not keynames:
self.logdebug('no keynames requirement.\n')
return True
if not isinstance(keynames, list):
keynames = [keynames]
if self.keyname in keynames:
self.logdebug('keyname "%s" matches rule.\n' % self.keyname)
return True
else:
self.logdebug('keyname "%s" does not match rule.\n' % self.keyname)
return False
def check_client_ip(self, rule):
"""If a client IP is specified, verify it is permitted."""
if not rule.get('from'):
self.logdebug('no "from" requirement.\n')
return True
allow_from = rule.get('from')
if not isinstance(allow_from, list):
allow_from = [allow_from]
client_ip = self.get_client_ip()
if client_ip in allow_from:
self.logdebug('client_ip %s in %s\n' % (client_ip, allow_from))
return True
else:
self.logdebug('client_ip %s not in %s' % (client_ip, allow_from))
return False
def get_merged_config(self):
"""Get merged config file.
Returns an open StringIO containing the
merged config file.
"""
if self.yamldocs:
return
loadfiles = []
if self.configfile:
loadfiles.append(self.configfile)
if self.configdir:
# Gets list of all non-dotfile files from configdir.
loadfiles.extend(
[f for f in
[os.path.join(self.configdir, x) for x in
os.listdir(self.configdir)]
if os.path.isfile(f) and
not os.path.basename(f).startswith('.')])
merged_configfile = io.StringIO()
merged_configfile.write('-\n')
for thefile in loadfiles:
self.logdebug('reading in config file %s\n' % thefile)
merged_configfile.write(open(thefile).read())
merged_configfile.write('\n-\n')
merged_configfile.seek(0)
self.logdebug('merged log file: """\n%s\n"""\n' %
merged_configfile.read())
merged_configfile.seek(0)
return merged_configfile
def load(self):
"""Load our config, log and raise on error."""
try:
merged_configfile = self.get_merged_config()
self.yamldocs = yaml.load(merged_configfile, Loader=Loader)
# Strip out the top level 'None's we get from concatenation.
# Functionally not required, but makes dumps cleaner.
self.yamldocs = [x for x in self.yamldocs if x]
self.logdebug('parsed_rules:\n%s\n' % pretty(self.yamldocs))
except (yaml.scanner.ScannerError, yaml.parser.ParserError):
self.raise_and_log_error(ConfigError, 'error parsing config.')
def dump_config(self):
"""Pretty print the configuration dict to stdout."""
yaml_content = self.get_merged_config()
print('YAML Configuration\n%s\n' % yaml_content.read())
try:
self.load()
print('Python Configuration\n%s\n' % pretty(self.yamldocs))
except ConfigError:
sys.stderr.write(
'config parse error. try running with --logfile=/dev/tty\n')
raise
def install_key_data(self, keydata, target):
"""Install the key data into the open file."""
target.seek(0)
contents = target.read()
ssh_opts = 'no-port-forwarding'
if keydata in contents:
raise InstallError('key data already in file - refusing '
'to double-install.\n')
command = '%s --run' % self.authprogs_binary
if self.logfile:
command += ' --logfile=%s' % self.logfile
if self.keyname:
command += ' --keyname=%s' % self.keyname
target.write('command="%(command)s",%(ssh_opts)s %(keydata)s\n' %
{'command': command,
'keydata': keydata,
'ssh_opts': ssh_opts})
def install_key(self, keyfile, authorized_keys):
"""Install a key into the authorized_keys file."""
# Make the directory containing the authorized_keys
# file, if it doesn't exist. (Typically ~/.ssh).
# Ignore errors; we'll fail shortly if we can't
# create the authkeys file.
try:
os.makedirs(os.path.dirname(authorized_keys), 0o700)
except OSError:
pass
keydata = open(keyfile).read()
target_fd = os.open(authorized_keys, os.O_RDWR | os.O_CREAT, 0o600)
self.install_key_data(keydata, os.fdopen(target_fd, 'w+'))
def find_match_scp(self, rule): # pylint: disable-msg=R0911,R0912
"""Handle scp commands."""
orig_list = []
orig_list.extend(self.original_command_list)
binary = orig_list.pop(0)
allowed_binaries = ['scp', '/usr/bin/scp']
if binary not in allowed_binaries:
self.logdebug('skipping scp processing - binary "%s" '
'not in approved list.\n' % binary)
return
filepath = orig_list.pop()
arguments = orig_list
if '-f' in arguments:
if not rule.get('allow_download'):
self.logdebug('scp denied - downloading forbidden.\n')
return
if '-t' in arguments:
if not rule.get('allow_upload'):
self.log('scp denied - uploading forbidden.\n')
return
if '-r' in arguments:
if not rule.get('allow_recursion'):
self.log('scp denied - recursive transfers forbidden.\n')
return
if '-p' in arguments:
if not rule.get('allow_permissions', 'true'):
self.log('scp denied - set/getting permissions '
'forbidden.\n')
return
if rule.get('files'):
files = rule.get('files')
if not isinstance(files, list):
files = [files]
if filepath not in files:
self.log('scp denied - file "%s" - not in approved '
'list %s\n' % (filepath, files))
return
# Allow it!
return {'command': self.original_command_list}
def find_match_command(self, rule):
"""Return a matching (possibly munged) command, if found in rule."""
command_string = rule['command']
command_list = command_string.split()
self.logdebug('comparing "%s" to "%s"\n' %
(command_list, self.original_command_list))
if rule.get('allow_trailing_args'):
self.logdebug('allow_trailing_args is true - comparing initial '
'list.\n')
# Verify the initial arguments are all the same
if (self.original_command_list[:len(command_list)] ==
command_list):
self.logdebug('initial list is same\n')
return {'command': self.original_command_list}
else:
self.logdebug('initial list is not same\n')
elif rule.get('pcre_match'):
if re.search(command_string, self.original_command_string):
return {'command': self.original_command_list}
elif command_list == self.original_command_list:
return {'command': command_list}
def find_match(self):
"""Load the config and find a matching rule.
returns the results of find_match_command, a dict of
the command and (in the future) other metadata.
"""
self.load()
for yamldoc in self.yamldocs:
self.logdebug('\nchecking rule """%s"""\n' % yamldoc)
if not yamldoc:
continue
if not self.check_client_ip(yamldoc):
# Rejected - Client IP does not match
continue
if not self.check_keyname(yamldoc):
# Rejected - keyname does not match
continue
rules = yamldoc.get('allow')
if not isinstance(rules, list):
rules = [rules]
for rule in rules:
rule_type = rule.get('rule_type', 'command')
if rule_type == 'command':
sub = self.find_match_command
elif rule_type == 'scp':
sub = self.find_match_scp
else:
self.log('fatal: no such rule_type "%s"\n' % rule_type)
self.raise_and_log_error(ConfigError,
'error parsing config.')
match = sub(rule)
if match:
return match
# No matches, time to give up.
raise CommandRejected('command "%s" denied.' %
self.original_command_string)
def exec_command(self):
"""Glean the command to run and exec.
On problems, sys.exit.
This method should *never* return.
"""
if not self.original_command_string:
raise SSHEnvironmentError('no SSH command found; '
'interactive shell disallowed.')
command_info = {'from': self.get_client_ip(),
'keyname': self.keyname,
'ssh_original_comand': self.original_command_string,
'time': time.time()}
os.environ['AUTHPROGS_KEYNAME'] = self.keyname
retcode = 126
try:
match = self.find_match()
command_info['command'] = match.get('command')
self.logdebug('find_match returned "%s"\n' % match)
command = match['command']
retcode = subprocess.call(command)
command_info['code'] = retcode
self.log('result: %s\n' % command_info)
sys.exit(retcode)
except (CommandRejected, OSError) as err:
command_info['exception'] = '%s' % err
self.log('result: %s\n' % command_info)
sys.exit(retcode)
|
daethnir/authprogs
|
authprogs/authprogs.py
|
AuthProgs.load
|
python
|
def load(self):
try:
merged_configfile = self.get_merged_config()
self.yamldocs = yaml.load(merged_configfile, Loader=Loader)
# Strip out the top level 'None's we get from concatenation.
# Functionally not required, but makes dumps cleaner.
self.yamldocs = [x for x in self.yamldocs if x]
self.logdebug('parsed_rules:\n%s\n' % pretty(self.yamldocs))
except (yaml.scanner.ScannerError, yaml.parser.ParserError):
self.raise_and_log_error(ConfigError, 'error parsing config.')
|
Load our config, log and raise on error.
|
train
|
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/authprogs/authprogs.py#L234-L246
|
[
"def pretty(thing):\n \"\"\"Return pretty-printable version.\"\"\"\n ppthing = pprint.PrettyPrinter(indent=4)\n return ppthing.pformat(thing)\n",
"def raise_and_log_error(self, error, message):\n \"\"\"Raise error, including message and original traceback.\n\n error: the error to raise\n message: the user-facing error message\n \"\"\"\n self.log('raising %s, traceback %s\\n' %\n (error, traceback.format_exc()))\n raise error(message)\n",
"def logdebug(self, message):\n \"\"\"Log debugging information.\"\"\"\n if self.debug:\n self.log(message)\n",
"def get_merged_config(self):\n \"\"\"Get merged config file.\n\n Returns an open StringIO containing the\n merged config file.\n \"\"\"\n if self.yamldocs:\n return\n\n loadfiles = []\n if self.configfile:\n loadfiles.append(self.configfile)\n\n if self.configdir:\n # Gets list of all non-dotfile files from configdir.\n loadfiles.extend(\n [f for f in\n [os.path.join(self.configdir, x) for x in\n os.listdir(self.configdir)]\n if os.path.isfile(f) and\n not os.path.basename(f).startswith('.')])\n\n merged_configfile = io.StringIO()\n merged_configfile.write('-\\n')\n for thefile in loadfiles:\n self.logdebug('reading in config file %s\\n' % thefile)\n merged_configfile.write(open(thefile).read())\n merged_configfile.write('\\n-\\n')\n merged_configfile.seek(0)\n self.logdebug('merged log file: \"\"\"\\n%s\\n\"\"\"\\n' %\n merged_configfile.read())\n merged_configfile.seek(0)\n return merged_configfile\n"
] |
class AuthProgs(object): # pylint: disable-msg=R0902
"""AuthProgs class"""
def __init__(self, logfile=None, configfile=None,
configdir=None, debug=False, **kwargs):
"""AuthProgs constructor.
kwargs include:
authprogs_binary: path to this binary, when creating
authorized_keys entries.
If not specified, determines from sys.argv[0]
name: the name of this key, for matching in rules.
"""
self.debug = debug
self.logfile = logfile
self.client_ip = None
if logfile:
self.logfh = open(logfile, 'a')
else:
self.logfh = False
if kwargs.get('authprogs_binary'):
self.authprogs_binary = kwargs['authprogs_binary']
else:
self.authprogs_binary = (
os.path.abspath(os.path.abspath(sys.argv[0])))
self.original_command_string = os.environ.get(
'SSH_ORIGINAL_COMMAND', '')
self.original_command_list = self.original_command_string.split()
self.keyname = kwargs.get('keyname')
if not self.keyname:
self.keyname = ''
if ' ' in self.keyname or '\t' in self.keyname:
self.log('FATAL: keyname contains space/tabs\n')
raise Error('--keyname may contain neither spaces nor tabs.')
self.yamldocs = None
self.configfile = configfile
self.configdir = configdir
def raise_and_log_error(self, error, message):
"""Raise error, including message and original traceback.
error: the error to raise
message: the user-facing error message
"""
self.log('raising %s, traceback %s\n' %
(error, traceback.format_exc()))
raise error(message)
def get_client_ip(self):
"""Return the client IP from the environment."""
if self.client_ip:
return self.client_ip
try:
client = os.environ.get('SSH_CONNECTION',
os.environ.get('SSH_CLIENT'))
self.client_ip = client.split()[0]
self.logdebug('client_ip: %s\n' % self.client_ip)
return self.client_ip
except:
raise SSHEnvironmentError('cannot identify the ssh client '
'IP address')
def logdebug(self, message):
"""Log debugging information."""
if self.debug:
self.log(message)
def log(self, message):
"""Log information."""
if self.logfh:
self.logfh.write(message) # pylint: disable-msg=E1103
def check_keyname(self, rule):
"""If a key name is specified, verify it is permitted."""
keynames = rule.get('keynames')
if not keynames:
self.logdebug('no keynames requirement.\n')
return True
if not isinstance(keynames, list):
keynames = [keynames]
if self.keyname in keynames:
self.logdebug('keyname "%s" matches rule.\n' % self.keyname)
return True
else:
self.logdebug('keyname "%s" does not match rule.\n' % self.keyname)
return False
def check_client_ip(self, rule):
"""If a client IP is specified, verify it is permitted."""
if not rule.get('from'):
self.logdebug('no "from" requirement.\n')
return True
allow_from = rule.get('from')
if not isinstance(allow_from, list):
allow_from = [allow_from]
client_ip = self.get_client_ip()
if client_ip in allow_from:
self.logdebug('client_ip %s in %s\n' % (client_ip, allow_from))
return True
else:
self.logdebug('client_ip %s not in %s' % (client_ip, allow_from))
return False
def get_merged_config(self):
"""Get merged config file.
Returns an open StringIO containing the
merged config file.
"""
if self.yamldocs:
return
loadfiles = []
if self.configfile:
loadfiles.append(self.configfile)
if self.configdir:
# Gets list of all non-dotfile files from configdir.
loadfiles.extend(
[f for f in
[os.path.join(self.configdir, x) for x in
os.listdir(self.configdir)]
if os.path.isfile(f) and
not os.path.basename(f).startswith('.')])
merged_configfile = io.StringIO()
merged_configfile.write('-\n')
for thefile in loadfiles:
self.logdebug('reading in config file %s\n' % thefile)
merged_configfile.write(open(thefile).read())
merged_configfile.write('\n-\n')
merged_configfile.seek(0)
self.logdebug('merged log file: """\n%s\n"""\n' %
merged_configfile.read())
merged_configfile.seek(0)
return merged_configfile
def dump_config(self):
"""Pretty print the configuration dict to stdout."""
yaml_content = self.get_merged_config()
print('YAML Configuration\n%s\n' % yaml_content.read())
try:
self.load()
print('Python Configuration\n%s\n' % pretty(self.yamldocs))
except ConfigError:
sys.stderr.write(
'config parse error. try running with --logfile=/dev/tty\n')
raise
def install_key_data(self, keydata, target):
"""Install the key data into the open file."""
target.seek(0)
contents = target.read()
ssh_opts = 'no-port-forwarding'
if keydata in contents:
raise InstallError('key data already in file - refusing '
'to double-install.\n')
command = '%s --run' % self.authprogs_binary
if self.logfile:
command += ' --logfile=%s' % self.logfile
if self.keyname:
command += ' --keyname=%s' % self.keyname
target.write('command="%(command)s",%(ssh_opts)s %(keydata)s\n' %
{'command': command,
'keydata': keydata,
'ssh_opts': ssh_opts})
def install_key(self, keyfile, authorized_keys):
"""Install a key into the authorized_keys file."""
# Make the directory containing the authorized_keys
# file, if it doesn't exist. (Typically ~/.ssh).
# Ignore errors; we'll fail shortly if we can't
# create the authkeys file.
try:
os.makedirs(os.path.dirname(authorized_keys), 0o700)
except OSError:
pass
keydata = open(keyfile).read()
target_fd = os.open(authorized_keys, os.O_RDWR | os.O_CREAT, 0o600)
self.install_key_data(keydata, os.fdopen(target_fd, 'w+'))
def find_match_scp(self, rule): # pylint: disable-msg=R0911,R0912
"""Handle scp commands."""
orig_list = []
orig_list.extend(self.original_command_list)
binary = orig_list.pop(0)
allowed_binaries = ['scp', '/usr/bin/scp']
if binary not in allowed_binaries:
self.logdebug('skipping scp processing - binary "%s" '
'not in approved list.\n' % binary)
return
filepath = orig_list.pop()
arguments = orig_list
if '-f' in arguments:
if not rule.get('allow_download'):
self.logdebug('scp denied - downloading forbidden.\n')
return
if '-t' in arguments:
if not rule.get('allow_upload'):
self.log('scp denied - uploading forbidden.\n')
return
if '-r' in arguments:
if not rule.get('allow_recursion'):
self.log('scp denied - recursive transfers forbidden.\n')
return
if '-p' in arguments:
if not rule.get('allow_permissions', 'true'):
self.log('scp denied - set/getting permissions '
'forbidden.\n')
return
if rule.get('files'):
files = rule.get('files')
if not isinstance(files, list):
files = [files]
if filepath not in files:
self.log('scp denied - file "%s" - not in approved '
'list %s\n' % (filepath, files))
return
# Allow it!
return {'command': self.original_command_list}
def find_match_command(self, rule):
"""Return a matching (possibly munged) command, if found in rule."""
command_string = rule['command']
command_list = command_string.split()
self.logdebug('comparing "%s" to "%s"\n' %
(command_list, self.original_command_list))
if rule.get('allow_trailing_args'):
self.logdebug('allow_trailing_args is true - comparing initial '
'list.\n')
# Verify the initial arguments are all the same
if (self.original_command_list[:len(command_list)] ==
command_list):
self.logdebug('initial list is same\n')
return {'command': self.original_command_list}
else:
self.logdebug('initial list is not same\n')
elif rule.get('pcre_match'):
if re.search(command_string, self.original_command_string):
return {'command': self.original_command_list}
elif command_list == self.original_command_list:
return {'command': command_list}
def find_match(self):
"""Load the config and find a matching rule.
returns the results of find_match_command, a dict of
the command and (in the future) other metadata.
"""
self.load()
for yamldoc in self.yamldocs:
self.logdebug('\nchecking rule """%s"""\n' % yamldoc)
if not yamldoc:
continue
if not self.check_client_ip(yamldoc):
# Rejected - Client IP does not match
continue
if not self.check_keyname(yamldoc):
# Rejected - keyname does not match
continue
rules = yamldoc.get('allow')
if not isinstance(rules, list):
rules = [rules]
for rule in rules:
rule_type = rule.get('rule_type', 'command')
if rule_type == 'command':
sub = self.find_match_command
elif rule_type == 'scp':
sub = self.find_match_scp
else:
self.log('fatal: no such rule_type "%s"\n' % rule_type)
self.raise_and_log_error(ConfigError,
'error parsing config.')
match = sub(rule)
if match:
return match
# No matches, time to give up.
raise CommandRejected('command "%s" denied.' %
self.original_command_string)
def exec_command(self):
"""Glean the command to run and exec.
On problems, sys.exit.
This method should *never* return.
"""
if not self.original_command_string:
raise SSHEnvironmentError('no SSH command found; '
'interactive shell disallowed.')
command_info = {'from': self.get_client_ip(),
'keyname': self.keyname,
'ssh_original_comand': self.original_command_string,
'time': time.time()}
os.environ['AUTHPROGS_KEYNAME'] = self.keyname
retcode = 126
try:
match = self.find_match()
command_info['command'] = match.get('command')
self.logdebug('find_match returned "%s"\n' % match)
command = match['command']
retcode = subprocess.call(command)
command_info['code'] = retcode
self.log('result: %s\n' % command_info)
sys.exit(retcode)
except (CommandRejected, OSError) as err:
command_info['exception'] = '%s' % err
self.log('result: %s\n' % command_info)
sys.exit(retcode)
|
daethnir/authprogs
|
authprogs/authprogs.py
|
AuthProgs.dump_config
|
python
|
def dump_config(self):
yaml_content = self.get_merged_config()
print('YAML Configuration\n%s\n' % yaml_content.read())
try:
self.load()
print('Python Configuration\n%s\n' % pretty(self.yamldocs))
except ConfigError:
sys.stderr.write(
'config parse error. try running with --logfile=/dev/tty\n')
raise
|
Pretty print the configuration dict to stdout.
|
train
|
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/authprogs/authprogs.py#L248-L258
|
[
"def pretty(thing):\n \"\"\"Return pretty-printable version.\"\"\"\n ppthing = pprint.PrettyPrinter(indent=4)\n return ppthing.pformat(thing)\n",
"def get_merged_config(self):\n \"\"\"Get merged config file.\n\n Returns an open StringIO containing the\n merged config file.\n \"\"\"\n if self.yamldocs:\n return\n\n loadfiles = []\n if self.configfile:\n loadfiles.append(self.configfile)\n\n if self.configdir:\n # Gets list of all non-dotfile files from configdir.\n loadfiles.extend(\n [f for f in\n [os.path.join(self.configdir, x) for x in\n os.listdir(self.configdir)]\n if os.path.isfile(f) and\n not os.path.basename(f).startswith('.')])\n\n merged_configfile = io.StringIO()\n merged_configfile.write('-\\n')\n for thefile in loadfiles:\n self.logdebug('reading in config file %s\\n' % thefile)\n merged_configfile.write(open(thefile).read())\n merged_configfile.write('\\n-\\n')\n merged_configfile.seek(0)\n self.logdebug('merged log file: \"\"\"\\n%s\\n\"\"\"\\n' %\n merged_configfile.read())\n merged_configfile.seek(0)\n return merged_configfile\n",
"def load(self):\n \"\"\"Load our config, log and raise on error.\"\"\"\n try:\n merged_configfile = self.get_merged_config()\n self.yamldocs = yaml.load(merged_configfile, Loader=Loader)\n\n # Strip out the top level 'None's we get from concatenation.\n # Functionally not required, but makes dumps cleaner.\n self.yamldocs = [x for x in self.yamldocs if x]\n self.logdebug('parsed_rules:\\n%s\\n' % pretty(self.yamldocs))\n\n except (yaml.scanner.ScannerError, yaml.parser.ParserError):\n self.raise_and_log_error(ConfigError, 'error parsing config.')\n"
] |
class AuthProgs(object): # pylint: disable-msg=R0902
"""AuthProgs class"""
def __init__(self, logfile=None, configfile=None,
configdir=None, debug=False, **kwargs):
"""AuthProgs constructor.
kwargs include:
authprogs_binary: path to this binary, when creating
authorized_keys entries.
If not specified, determines from sys.argv[0]
name: the name of this key, for matching in rules.
"""
self.debug = debug
self.logfile = logfile
self.client_ip = None
if logfile:
self.logfh = open(logfile, 'a')
else:
self.logfh = False
if kwargs.get('authprogs_binary'):
self.authprogs_binary = kwargs['authprogs_binary']
else:
self.authprogs_binary = (
os.path.abspath(os.path.abspath(sys.argv[0])))
self.original_command_string = os.environ.get(
'SSH_ORIGINAL_COMMAND', '')
self.original_command_list = self.original_command_string.split()
self.keyname = kwargs.get('keyname')
if not self.keyname:
self.keyname = ''
if ' ' in self.keyname or '\t' in self.keyname:
self.log('FATAL: keyname contains space/tabs\n')
raise Error('--keyname may contain neither spaces nor tabs.')
self.yamldocs = None
self.configfile = configfile
self.configdir = configdir
def raise_and_log_error(self, error, message):
"""Raise error, including message and original traceback.
error: the error to raise
message: the user-facing error message
"""
self.log('raising %s, traceback %s\n' %
(error, traceback.format_exc()))
raise error(message)
def get_client_ip(self):
"""Return the client IP from the environment."""
if self.client_ip:
return self.client_ip
try:
client = os.environ.get('SSH_CONNECTION',
os.environ.get('SSH_CLIENT'))
self.client_ip = client.split()[0]
self.logdebug('client_ip: %s\n' % self.client_ip)
return self.client_ip
except:
raise SSHEnvironmentError('cannot identify the ssh client '
'IP address')
def logdebug(self, message):
"""Log debugging information."""
if self.debug:
self.log(message)
def log(self, message):
"""Log information."""
if self.logfh:
self.logfh.write(message) # pylint: disable-msg=E1103
def check_keyname(self, rule):
"""If a key name is specified, verify it is permitted."""
keynames = rule.get('keynames')
if not keynames:
self.logdebug('no keynames requirement.\n')
return True
if not isinstance(keynames, list):
keynames = [keynames]
if self.keyname in keynames:
self.logdebug('keyname "%s" matches rule.\n' % self.keyname)
return True
else:
self.logdebug('keyname "%s" does not match rule.\n' % self.keyname)
return False
def check_client_ip(self, rule):
"""If a client IP is specified, verify it is permitted."""
if not rule.get('from'):
self.logdebug('no "from" requirement.\n')
return True
allow_from = rule.get('from')
if not isinstance(allow_from, list):
allow_from = [allow_from]
client_ip = self.get_client_ip()
if client_ip in allow_from:
self.logdebug('client_ip %s in %s\n' % (client_ip, allow_from))
return True
else:
self.logdebug('client_ip %s not in %s' % (client_ip, allow_from))
return False
def get_merged_config(self):
"""Get merged config file.
Returns an open StringIO containing the
merged config file.
"""
if self.yamldocs:
return
loadfiles = []
if self.configfile:
loadfiles.append(self.configfile)
if self.configdir:
# Gets list of all non-dotfile files from configdir.
loadfiles.extend(
[f for f in
[os.path.join(self.configdir, x) for x in
os.listdir(self.configdir)]
if os.path.isfile(f) and
not os.path.basename(f).startswith('.')])
merged_configfile = io.StringIO()
merged_configfile.write('-\n')
for thefile in loadfiles:
self.logdebug('reading in config file %s\n' % thefile)
merged_configfile.write(open(thefile).read())
merged_configfile.write('\n-\n')
merged_configfile.seek(0)
self.logdebug('merged log file: """\n%s\n"""\n' %
merged_configfile.read())
merged_configfile.seek(0)
return merged_configfile
def load(self):
"""Load our config, log and raise on error."""
try:
merged_configfile = self.get_merged_config()
self.yamldocs = yaml.load(merged_configfile, Loader=Loader)
# Strip out the top level 'None's we get from concatenation.
# Functionally not required, but makes dumps cleaner.
self.yamldocs = [x for x in self.yamldocs if x]
self.logdebug('parsed_rules:\n%s\n' % pretty(self.yamldocs))
except (yaml.scanner.ScannerError, yaml.parser.ParserError):
self.raise_and_log_error(ConfigError, 'error parsing config.')
def install_key_data(self, keydata, target):
"""Install the key data into the open file."""
target.seek(0)
contents = target.read()
ssh_opts = 'no-port-forwarding'
if keydata in contents:
raise InstallError('key data already in file - refusing '
'to double-install.\n')
command = '%s --run' % self.authprogs_binary
if self.logfile:
command += ' --logfile=%s' % self.logfile
if self.keyname:
command += ' --keyname=%s' % self.keyname
target.write('command="%(command)s",%(ssh_opts)s %(keydata)s\n' %
{'command': command,
'keydata': keydata,
'ssh_opts': ssh_opts})
def install_key(self, keyfile, authorized_keys):
"""Install a key into the authorized_keys file."""
# Make the directory containing the authorized_keys
# file, if it doesn't exist. (Typically ~/.ssh).
# Ignore errors; we'll fail shortly if we can't
# create the authkeys file.
try:
os.makedirs(os.path.dirname(authorized_keys), 0o700)
except OSError:
pass
keydata = open(keyfile).read()
target_fd = os.open(authorized_keys, os.O_RDWR | os.O_CREAT, 0o600)
self.install_key_data(keydata, os.fdopen(target_fd, 'w+'))
def find_match_scp(self, rule): # pylint: disable-msg=R0911,R0912
"""Handle scp commands."""
orig_list = []
orig_list.extend(self.original_command_list)
binary = orig_list.pop(0)
allowed_binaries = ['scp', '/usr/bin/scp']
if binary not in allowed_binaries:
self.logdebug('skipping scp processing - binary "%s" '
'not in approved list.\n' % binary)
return
filepath = orig_list.pop()
arguments = orig_list
if '-f' in arguments:
if not rule.get('allow_download'):
self.logdebug('scp denied - downloading forbidden.\n')
return
if '-t' in arguments:
if not rule.get('allow_upload'):
self.log('scp denied - uploading forbidden.\n')
return
if '-r' in arguments:
if not rule.get('allow_recursion'):
self.log('scp denied - recursive transfers forbidden.\n')
return
if '-p' in arguments:
if not rule.get('allow_permissions', 'true'):
self.log('scp denied - set/getting permissions '
'forbidden.\n')
return
if rule.get('files'):
files = rule.get('files')
if not isinstance(files, list):
files = [files]
if filepath not in files:
self.log('scp denied - file "%s" - not in approved '
'list %s\n' % (filepath, files))
return
# Allow it!
return {'command': self.original_command_list}
def find_match_command(self, rule):
"""Return a matching (possibly munged) command, if found in rule."""
command_string = rule['command']
command_list = command_string.split()
self.logdebug('comparing "%s" to "%s"\n' %
(command_list, self.original_command_list))
if rule.get('allow_trailing_args'):
self.logdebug('allow_trailing_args is true - comparing initial '
'list.\n')
# Verify the initial arguments are all the same
if (self.original_command_list[:len(command_list)] ==
command_list):
self.logdebug('initial list is same\n')
return {'command': self.original_command_list}
else:
self.logdebug('initial list is not same\n')
elif rule.get('pcre_match'):
if re.search(command_string, self.original_command_string):
return {'command': self.original_command_list}
elif command_list == self.original_command_list:
return {'command': command_list}
def find_match(self):
"""Load the config and find a matching rule.
returns the results of find_match_command, a dict of
the command and (in the future) other metadata.
"""
self.load()
for yamldoc in self.yamldocs:
self.logdebug('\nchecking rule """%s"""\n' % yamldoc)
if not yamldoc:
continue
if not self.check_client_ip(yamldoc):
# Rejected - Client IP does not match
continue
if not self.check_keyname(yamldoc):
# Rejected - keyname does not match
continue
rules = yamldoc.get('allow')
if not isinstance(rules, list):
rules = [rules]
for rule in rules:
rule_type = rule.get('rule_type', 'command')
if rule_type == 'command':
sub = self.find_match_command
elif rule_type == 'scp':
sub = self.find_match_scp
else:
self.log('fatal: no such rule_type "%s"\n' % rule_type)
self.raise_and_log_error(ConfigError,
'error parsing config.')
match = sub(rule)
if match:
return match
# No matches, time to give up.
raise CommandRejected('command "%s" denied.' %
self.original_command_string)
def exec_command(self):
"""Glean the command to run and exec.
On problems, sys.exit.
This method should *never* return.
"""
if not self.original_command_string:
raise SSHEnvironmentError('no SSH command found; '
'interactive shell disallowed.')
command_info = {'from': self.get_client_ip(),
'keyname': self.keyname,
'ssh_original_comand': self.original_command_string,
'time': time.time()}
os.environ['AUTHPROGS_KEYNAME'] = self.keyname
retcode = 126
try:
match = self.find_match()
command_info['command'] = match.get('command')
self.logdebug('find_match returned "%s"\n' % match)
command = match['command']
retcode = subprocess.call(command)
command_info['code'] = retcode
self.log('result: %s\n' % command_info)
sys.exit(retcode)
except (CommandRejected, OSError) as err:
command_info['exception'] = '%s' % err
self.log('result: %s\n' % command_info)
sys.exit(retcode)
|
daethnir/authprogs
|
authprogs/authprogs.py
|
AuthProgs.install_key_data
|
python
|
def install_key_data(self, keydata, target):
target.seek(0)
contents = target.read()
ssh_opts = 'no-port-forwarding'
if keydata in contents:
raise InstallError('key data already in file - refusing '
'to double-install.\n')
command = '%s --run' % self.authprogs_binary
if self.logfile:
command += ' --logfile=%s' % self.logfile
if self.keyname:
command += ' --keyname=%s' % self.keyname
target.write('command="%(command)s",%(ssh_opts)s %(keydata)s\n' %
{'command': command,
'keydata': keydata,
'ssh_opts': ssh_opts})
|
Install the key data into the open file.
|
train
|
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/authprogs/authprogs.py#L260-L278
| null |
class AuthProgs(object): # pylint: disable-msg=R0902
"""AuthProgs class"""
def __init__(self, logfile=None, configfile=None,
configdir=None, debug=False, **kwargs):
"""AuthProgs constructor.
kwargs include:
authprogs_binary: path to this binary, when creating
authorized_keys entries.
If not specified, determines from sys.argv[0]
name: the name of this key, for matching in rules.
"""
self.debug = debug
self.logfile = logfile
self.client_ip = None
if logfile:
self.logfh = open(logfile, 'a')
else:
self.logfh = False
if kwargs.get('authprogs_binary'):
self.authprogs_binary = kwargs['authprogs_binary']
else:
self.authprogs_binary = (
os.path.abspath(os.path.abspath(sys.argv[0])))
self.original_command_string = os.environ.get(
'SSH_ORIGINAL_COMMAND', '')
self.original_command_list = self.original_command_string.split()
self.keyname = kwargs.get('keyname')
if not self.keyname:
self.keyname = ''
if ' ' in self.keyname or '\t' in self.keyname:
self.log('FATAL: keyname contains space/tabs\n')
raise Error('--keyname may contain neither spaces nor tabs.')
self.yamldocs = None
self.configfile = configfile
self.configdir = configdir
def raise_and_log_error(self, error, message):
"""Raise error, including message and original traceback.
error: the error to raise
message: the user-facing error message
"""
self.log('raising %s, traceback %s\n' %
(error, traceback.format_exc()))
raise error(message)
def get_client_ip(self):
"""Return the client IP from the environment."""
if self.client_ip:
return self.client_ip
try:
client = os.environ.get('SSH_CONNECTION',
os.environ.get('SSH_CLIENT'))
self.client_ip = client.split()[0]
self.logdebug('client_ip: %s\n' % self.client_ip)
return self.client_ip
except:
raise SSHEnvironmentError('cannot identify the ssh client '
'IP address')
def logdebug(self, message):
"""Log debugging information."""
if self.debug:
self.log(message)
def log(self, message):
"""Log information."""
if self.logfh:
self.logfh.write(message) # pylint: disable-msg=E1103
def check_keyname(self, rule):
"""If a key name is specified, verify it is permitted."""
keynames = rule.get('keynames')
if not keynames:
self.logdebug('no keynames requirement.\n')
return True
if not isinstance(keynames, list):
keynames = [keynames]
if self.keyname in keynames:
self.logdebug('keyname "%s" matches rule.\n' % self.keyname)
return True
else:
self.logdebug('keyname "%s" does not match rule.\n' % self.keyname)
return False
def check_client_ip(self, rule):
"""If a client IP is specified, verify it is permitted."""
if not rule.get('from'):
self.logdebug('no "from" requirement.\n')
return True
allow_from = rule.get('from')
if not isinstance(allow_from, list):
allow_from = [allow_from]
client_ip = self.get_client_ip()
if client_ip in allow_from:
self.logdebug('client_ip %s in %s\n' % (client_ip, allow_from))
return True
else:
self.logdebug('client_ip %s not in %s' % (client_ip, allow_from))
return False
def get_merged_config(self):
"""Get merged config file.
Returns an open StringIO containing the
merged config file.
"""
if self.yamldocs:
return
loadfiles = []
if self.configfile:
loadfiles.append(self.configfile)
if self.configdir:
# Gets list of all non-dotfile files from configdir.
loadfiles.extend(
[f for f in
[os.path.join(self.configdir, x) for x in
os.listdir(self.configdir)]
if os.path.isfile(f) and
not os.path.basename(f).startswith('.')])
merged_configfile = io.StringIO()
merged_configfile.write('-\n')
for thefile in loadfiles:
self.logdebug('reading in config file %s\n' % thefile)
merged_configfile.write(open(thefile).read())
merged_configfile.write('\n-\n')
merged_configfile.seek(0)
self.logdebug('merged log file: """\n%s\n"""\n' %
merged_configfile.read())
merged_configfile.seek(0)
return merged_configfile
def load(self):
"""Load our config, log and raise on error."""
try:
merged_configfile = self.get_merged_config()
self.yamldocs = yaml.load(merged_configfile, Loader=Loader)
# Strip out the top level 'None's we get from concatenation.
# Functionally not required, but makes dumps cleaner.
self.yamldocs = [x for x in self.yamldocs if x]
self.logdebug('parsed_rules:\n%s\n' % pretty(self.yamldocs))
except (yaml.scanner.ScannerError, yaml.parser.ParserError):
self.raise_and_log_error(ConfigError, 'error parsing config.')
def dump_config(self):
"""Pretty print the configuration dict to stdout."""
yaml_content = self.get_merged_config()
print('YAML Configuration\n%s\n' % yaml_content.read())
try:
self.load()
print('Python Configuration\n%s\n' % pretty(self.yamldocs))
except ConfigError:
sys.stderr.write(
'config parse error. try running with --logfile=/dev/tty\n')
raise
def install_key(self, keyfile, authorized_keys):
"""Install a key into the authorized_keys file."""
# Make the directory containing the authorized_keys
# file, if it doesn't exist. (Typically ~/.ssh).
# Ignore errors; we'll fail shortly if we can't
# create the authkeys file.
try:
os.makedirs(os.path.dirname(authorized_keys), 0o700)
except OSError:
pass
keydata = open(keyfile).read()
target_fd = os.open(authorized_keys, os.O_RDWR | os.O_CREAT, 0o600)
self.install_key_data(keydata, os.fdopen(target_fd, 'w+'))
def find_match_scp(self, rule): # pylint: disable-msg=R0911,R0912
"""Handle scp commands."""
orig_list = []
orig_list.extend(self.original_command_list)
binary = orig_list.pop(0)
allowed_binaries = ['scp', '/usr/bin/scp']
if binary not in allowed_binaries:
self.logdebug('skipping scp processing - binary "%s" '
'not in approved list.\n' % binary)
return
filepath = orig_list.pop()
arguments = orig_list
if '-f' in arguments:
if not rule.get('allow_download'):
self.logdebug('scp denied - downloading forbidden.\n')
return
if '-t' in arguments:
if not rule.get('allow_upload'):
self.log('scp denied - uploading forbidden.\n')
return
if '-r' in arguments:
if not rule.get('allow_recursion'):
self.log('scp denied - recursive transfers forbidden.\n')
return
if '-p' in arguments:
if not rule.get('allow_permissions', 'true'):
self.log('scp denied - set/getting permissions '
'forbidden.\n')
return
if rule.get('files'):
files = rule.get('files')
if not isinstance(files, list):
files = [files]
if filepath not in files:
self.log('scp denied - file "%s" - not in approved '
'list %s\n' % (filepath, files))
return
# Allow it!
return {'command': self.original_command_list}
def find_match_command(self, rule):
"""Return a matching (possibly munged) command, if found in rule."""
command_string = rule['command']
command_list = command_string.split()
self.logdebug('comparing "%s" to "%s"\n' %
(command_list, self.original_command_list))
if rule.get('allow_trailing_args'):
self.logdebug('allow_trailing_args is true - comparing initial '
'list.\n')
# Verify the initial arguments are all the same
if (self.original_command_list[:len(command_list)] ==
command_list):
self.logdebug('initial list is same\n')
return {'command': self.original_command_list}
else:
self.logdebug('initial list is not same\n')
elif rule.get('pcre_match'):
if re.search(command_string, self.original_command_string):
return {'command': self.original_command_list}
elif command_list == self.original_command_list:
return {'command': command_list}
def find_match(self):
"""Load the config and find a matching rule.
returns the results of find_match_command, a dict of
the command and (in the future) other metadata.
"""
self.load()
for yamldoc in self.yamldocs:
self.logdebug('\nchecking rule """%s"""\n' % yamldoc)
if not yamldoc:
continue
if not self.check_client_ip(yamldoc):
# Rejected - Client IP does not match
continue
if not self.check_keyname(yamldoc):
# Rejected - keyname does not match
continue
rules = yamldoc.get('allow')
if not isinstance(rules, list):
rules = [rules]
for rule in rules:
rule_type = rule.get('rule_type', 'command')
if rule_type == 'command':
sub = self.find_match_command
elif rule_type == 'scp':
sub = self.find_match_scp
else:
self.log('fatal: no such rule_type "%s"\n' % rule_type)
self.raise_and_log_error(ConfigError,
'error parsing config.')
match = sub(rule)
if match:
return match
# No matches, time to give up.
raise CommandRejected('command "%s" denied.' %
self.original_command_string)
def exec_command(self):
"""Glean the command to run and exec.
On problems, sys.exit.
This method should *never* return.
"""
if not self.original_command_string:
raise SSHEnvironmentError('no SSH command found; '
'interactive shell disallowed.')
command_info = {'from': self.get_client_ip(),
'keyname': self.keyname,
'ssh_original_comand': self.original_command_string,
'time': time.time()}
os.environ['AUTHPROGS_KEYNAME'] = self.keyname
retcode = 126
try:
match = self.find_match()
command_info['command'] = match.get('command')
self.logdebug('find_match returned "%s"\n' % match)
command = match['command']
retcode = subprocess.call(command)
command_info['code'] = retcode
self.log('result: %s\n' % command_info)
sys.exit(retcode)
except (CommandRejected, OSError) as err:
command_info['exception'] = '%s' % err
self.log('result: %s\n' % command_info)
sys.exit(retcode)
|
daethnir/authprogs
|
authprogs/authprogs.py
|
AuthProgs.install_key
|
python
|
def install_key(self, keyfile, authorized_keys):
# Make the directory containing the authorized_keys
# file, if it doesn't exist. (Typically ~/.ssh).
# Ignore errors; we'll fail shortly if we can't
# create the authkeys file.
try:
os.makedirs(os.path.dirname(authorized_keys), 0o700)
except OSError:
pass
keydata = open(keyfile).read()
target_fd = os.open(authorized_keys, os.O_RDWR | os.O_CREAT, 0o600)
self.install_key_data(keydata, os.fdopen(target_fd, 'w+'))
|
Install a key into the authorized_keys file.
|
train
|
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/authprogs/authprogs.py#L280-L294
|
[
"def install_key_data(self, keydata, target):\n \"\"\"Install the key data into the open file.\"\"\"\n\n target.seek(0)\n contents = target.read()\n ssh_opts = 'no-port-forwarding'\n if keydata in contents:\n raise InstallError('key data already in file - refusing '\n 'to double-install.\\n')\n command = '%s --run' % self.authprogs_binary\n if self.logfile:\n command += ' --logfile=%s' % self.logfile\n if self.keyname:\n command += ' --keyname=%s' % self.keyname\n\n target.write('command=\"%(command)s\",%(ssh_opts)s %(keydata)s\\n' %\n {'command': command,\n 'keydata': keydata,\n 'ssh_opts': ssh_opts})\n"
] |
class AuthProgs(object): # pylint: disable-msg=R0902
"""AuthProgs class"""
def __init__(self, logfile=None, configfile=None,
configdir=None, debug=False, **kwargs):
"""AuthProgs constructor.
kwargs include:
authprogs_binary: path to this binary, when creating
authorized_keys entries.
If not specified, determines from sys.argv[0]
name: the name of this key, for matching in rules.
"""
self.debug = debug
self.logfile = logfile
self.client_ip = None
if logfile:
self.logfh = open(logfile, 'a')
else:
self.logfh = False
if kwargs.get('authprogs_binary'):
self.authprogs_binary = kwargs['authprogs_binary']
else:
self.authprogs_binary = (
os.path.abspath(os.path.abspath(sys.argv[0])))
self.original_command_string = os.environ.get(
'SSH_ORIGINAL_COMMAND', '')
self.original_command_list = self.original_command_string.split()
self.keyname = kwargs.get('keyname')
if not self.keyname:
self.keyname = ''
if ' ' in self.keyname or '\t' in self.keyname:
self.log('FATAL: keyname contains space/tabs\n')
raise Error('--keyname may contain neither spaces nor tabs.')
self.yamldocs = None
self.configfile = configfile
self.configdir = configdir
def raise_and_log_error(self, error, message):
"""Raise error, including message and original traceback.
error: the error to raise
message: the user-facing error message
"""
self.log('raising %s, traceback %s\n' %
(error, traceback.format_exc()))
raise error(message)
def get_client_ip(self):
"""Return the client IP from the environment."""
if self.client_ip:
return self.client_ip
try:
client = os.environ.get('SSH_CONNECTION',
os.environ.get('SSH_CLIENT'))
self.client_ip = client.split()[0]
self.logdebug('client_ip: %s\n' % self.client_ip)
return self.client_ip
except:
raise SSHEnvironmentError('cannot identify the ssh client '
'IP address')
def logdebug(self, message):
"""Log debugging information."""
if self.debug:
self.log(message)
def log(self, message):
"""Log information."""
if self.logfh:
self.logfh.write(message) # pylint: disable-msg=E1103
def check_keyname(self, rule):
"""If a key name is specified, verify it is permitted."""
keynames = rule.get('keynames')
if not keynames:
self.logdebug('no keynames requirement.\n')
return True
if not isinstance(keynames, list):
keynames = [keynames]
if self.keyname in keynames:
self.logdebug('keyname "%s" matches rule.\n' % self.keyname)
return True
else:
self.logdebug('keyname "%s" does not match rule.\n' % self.keyname)
return False
def check_client_ip(self, rule):
"""If a client IP is specified, verify it is permitted."""
if not rule.get('from'):
self.logdebug('no "from" requirement.\n')
return True
allow_from = rule.get('from')
if not isinstance(allow_from, list):
allow_from = [allow_from]
client_ip = self.get_client_ip()
if client_ip in allow_from:
self.logdebug('client_ip %s in %s\n' % (client_ip, allow_from))
return True
else:
self.logdebug('client_ip %s not in %s' % (client_ip, allow_from))
return False
def get_merged_config(self):
"""Get merged config file.
Returns an open StringIO containing the
merged config file.
"""
if self.yamldocs:
return
loadfiles = []
if self.configfile:
loadfiles.append(self.configfile)
if self.configdir:
# Gets list of all non-dotfile files from configdir.
loadfiles.extend(
[f for f in
[os.path.join(self.configdir, x) for x in
os.listdir(self.configdir)]
if os.path.isfile(f) and
not os.path.basename(f).startswith('.')])
merged_configfile = io.StringIO()
merged_configfile.write('-\n')
for thefile in loadfiles:
self.logdebug('reading in config file %s\n' % thefile)
merged_configfile.write(open(thefile).read())
merged_configfile.write('\n-\n')
merged_configfile.seek(0)
self.logdebug('merged log file: """\n%s\n"""\n' %
merged_configfile.read())
merged_configfile.seek(0)
return merged_configfile
def load(self):
"""Load our config, log and raise on error."""
try:
merged_configfile = self.get_merged_config()
self.yamldocs = yaml.load(merged_configfile, Loader=Loader)
# Strip out the top level 'None's we get from concatenation.
# Functionally not required, but makes dumps cleaner.
self.yamldocs = [x for x in self.yamldocs if x]
self.logdebug('parsed_rules:\n%s\n' % pretty(self.yamldocs))
except (yaml.scanner.ScannerError, yaml.parser.ParserError):
self.raise_and_log_error(ConfigError, 'error parsing config.')
def dump_config(self):
"""Pretty print the configuration dict to stdout."""
yaml_content = self.get_merged_config()
print('YAML Configuration\n%s\n' % yaml_content.read())
try:
self.load()
print('Python Configuration\n%s\n' % pretty(self.yamldocs))
except ConfigError:
sys.stderr.write(
'config parse error. try running with --logfile=/dev/tty\n')
raise
def install_key_data(self, keydata, target):
"""Install the key data into the open file."""
target.seek(0)
contents = target.read()
ssh_opts = 'no-port-forwarding'
if keydata in contents:
raise InstallError('key data already in file - refusing '
'to double-install.\n')
command = '%s --run' % self.authprogs_binary
if self.logfile:
command += ' --logfile=%s' % self.logfile
if self.keyname:
command += ' --keyname=%s' % self.keyname
target.write('command="%(command)s",%(ssh_opts)s %(keydata)s\n' %
{'command': command,
'keydata': keydata,
'ssh_opts': ssh_opts})
def find_match_scp(self, rule): # pylint: disable-msg=R0911,R0912
"""Handle scp commands."""
orig_list = []
orig_list.extend(self.original_command_list)
binary = orig_list.pop(0)
allowed_binaries = ['scp', '/usr/bin/scp']
if binary not in allowed_binaries:
self.logdebug('skipping scp processing - binary "%s" '
'not in approved list.\n' % binary)
return
filepath = orig_list.pop()
arguments = orig_list
if '-f' in arguments:
if not rule.get('allow_download'):
self.logdebug('scp denied - downloading forbidden.\n')
return
if '-t' in arguments:
if not rule.get('allow_upload'):
self.log('scp denied - uploading forbidden.\n')
return
if '-r' in arguments:
if not rule.get('allow_recursion'):
self.log('scp denied - recursive transfers forbidden.\n')
return
if '-p' in arguments:
if not rule.get('allow_permissions', 'true'):
self.log('scp denied - set/getting permissions '
'forbidden.\n')
return
if rule.get('files'):
files = rule.get('files')
if not isinstance(files, list):
files = [files]
if filepath not in files:
self.log('scp denied - file "%s" - not in approved '
'list %s\n' % (filepath, files))
return
# Allow it!
return {'command': self.original_command_list}
def find_match_command(self, rule):
"""Return a matching (possibly munged) command, if found in rule."""
command_string = rule['command']
command_list = command_string.split()
self.logdebug('comparing "%s" to "%s"\n' %
(command_list, self.original_command_list))
if rule.get('allow_trailing_args'):
self.logdebug('allow_trailing_args is true - comparing initial '
'list.\n')
# Verify the initial arguments are all the same
if (self.original_command_list[:len(command_list)] ==
command_list):
self.logdebug('initial list is same\n')
return {'command': self.original_command_list}
else:
self.logdebug('initial list is not same\n')
elif rule.get('pcre_match'):
if re.search(command_string, self.original_command_string):
return {'command': self.original_command_list}
elif command_list == self.original_command_list:
return {'command': command_list}
def find_match(self):
"""Load the config and find a matching rule.
returns the results of find_match_command, a dict of
the command and (in the future) other metadata.
"""
self.load()
for yamldoc in self.yamldocs:
self.logdebug('\nchecking rule """%s"""\n' % yamldoc)
if not yamldoc:
continue
if not self.check_client_ip(yamldoc):
# Rejected - Client IP does not match
continue
if not self.check_keyname(yamldoc):
# Rejected - keyname does not match
continue
rules = yamldoc.get('allow')
if not isinstance(rules, list):
rules = [rules]
for rule in rules:
rule_type = rule.get('rule_type', 'command')
if rule_type == 'command':
sub = self.find_match_command
elif rule_type == 'scp':
sub = self.find_match_scp
else:
self.log('fatal: no such rule_type "%s"\n' % rule_type)
self.raise_and_log_error(ConfigError,
'error parsing config.')
match = sub(rule)
if match:
return match
# No matches, time to give up.
raise CommandRejected('command "%s" denied.' %
self.original_command_string)
def exec_command(self):
"""Glean the command to run and exec.
On problems, sys.exit.
This method should *never* return.
"""
if not self.original_command_string:
raise SSHEnvironmentError('no SSH command found; '
'interactive shell disallowed.')
command_info = {'from': self.get_client_ip(),
'keyname': self.keyname,
'ssh_original_comand': self.original_command_string,
'time': time.time()}
os.environ['AUTHPROGS_KEYNAME'] = self.keyname
retcode = 126
try:
match = self.find_match()
command_info['command'] = match.get('command')
self.logdebug('find_match returned "%s"\n' % match)
command = match['command']
retcode = subprocess.call(command)
command_info['code'] = retcode
self.log('result: %s\n' % command_info)
sys.exit(retcode)
except (CommandRejected, OSError) as err:
command_info['exception'] = '%s' % err
self.log('result: %s\n' % command_info)
sys.exit(retcode)
|
daethnir/authprogs
|
authprogs/authprogs.py
|
AuthProgs.find_match_scp
|
python
|
def find_match_scp(self, rule): # pylint: disable-msg=R0911,R0912
orig_list = []
orig_list.extend(self.original_command_list)
binary = orig_list.pop(0)
allowed_binaries = ['scp', '/usr/bin/scp']
if binary not in allowed_binaries:
self.logdebug('skipping scp processing - binary "%s" '
'not in approved list.\n' % binary)
return
filepath = orig_list.pop()
arguments = orig_list
if '-f' in arguments:
if not rule.get('allow_download'):
self.logdebug('scp denied - downloading forbidden.\n')
return
if '-t' in arguments:
if not rule.get('allow_upload'):
self.log('scp denied - uploading forbidden.\n')
return
if '-r' in arguments:
if not rule.get('allow_recursion'):
self.log('scp denied - recursive transfers forbidden.\n')
return
if '-p' in arguments:
if not rule.get('allow_permissions', 'true'):
self.log('scp denied - set/getting permissions '
'forbidden.\n')
return
if rule.get('files'):
files = rule.get('files')
if not isinstance(files, list):
files = [files]
if filepath not in files:
self.log('scp denied - file "%s" - not in approved '
'list %s\n' % (filepath, files))
return
# Allow it!
return {'command': self.original_command_list}
|
Handle scp commands.
|
train
|
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/authprogs/authprogs.py#L296-L342
|
[
"def logdebug(self, message):\n \"\"\"Log debugging information.\"\"\"\n if self.debug:\n self.log(message)\n",
"def log(self, message):\n \"\"\"Log information.\"\"\"\n if self.logfh:\n self.logfh.write(message) # pylint: disable-msg=E1103\n"
] |
class AuthProgs(object): # pylint: disable-msg=R0902
"""AuthProgs class"""
def __init__(self, logfile=None, configfile=None,
configdir=None, debug=False, **kwargs):
"""AuthProgs constructor.
kwargs include:
authprogs_binary: path to this binary, when creating
authorized_keys entries.
If not specified, determines from sys.argv[0]
name: the name of this key, for matching in rules.
"""
self.debug = debug
self.logfile = logfile
self.client_ip = None
if logfile:
self.logfh = open(logfile, 'a')
else:
self.logfh = False
if kwargs.get('authprogs_binary'):
self.authprogs_binary = kwargs['authprogs_binary']
else:
self.authprogs_binary = (
os.path.abspath(os.path.abspath(sys.argv[0])))
self.original_command_string = os.environ.get(
'SSH_ORIGINAL_COMMAND', '')
self.original_command_list = self.original_command_string.split()
self.keyname = kwargs.get('keyname')
if not self.keyname:
self.keyname = ''
if ' ' in self.keyname or '\t' in self.keyname:
self.log('FATAL: keyname contains space/tabs\n')
raise Error('--keyname may contain neither spaces nor tabs.')
self.yamldocs = None
self.configfile = configfile
self.configdir = configdir
def raise_and_log_error(self, error, message):
"""Raise error, including message and original traceback.
error: the error to raise
message: the user-facing error message
"""
self.log('raising %s, traceback %s\n' %
(error, traceback.format_exc()))
raise error(message)
def get_client_ip(self):
"""Return the client IP from the environment."""
if self.client_ip:
return self.client_ip
try:
client = os.environ.get('SSH_CONNECTION',
os.environ.get('SSH_CLIENT'))
self.client_ip = client.split()[0]
self.logdebug('client_ip: %s\n' % self.client_ip)
return self.client_ip
except:
raise SSHEnvironmentError('cannot identify the ssh client '
'IP address')
def logdebug(self, message):
"""Log debugging information."""
if self.debug:
self.log(message)
def log(self, message):
"""Log information."""
if self.logfh:
self.logfh.write(message) # pylint: disable-msg=E1103
def check_keyname(self, rule):
"""If a key name is specified, verify it is permitted."""
keynames = rule.get('keynames')
if not keynames:
self.logdebug('no keynames requirement.\n')
return True
if not isinstance(keynames, list):
keynames = [keynames]
if self.keyname in keynames:
self.logdebug('keyname "%s" matches rule.\n' % self.keyname)
return True
else:
self.logdebug('keyname "%s" does not match rule.\n' % self.keyname)
return False
def check_client_ip(self, rule):
"""If a client IP is specified, verify it is permitted."""
if not rule.get('from'):
self.logdebug('no "from" requirement.\n')
return True
allow_from = rule.get('from')
if not isinstance(allow_from, list):
allow_from = [allow_from]
client_ip = self.get_client_ip()
if client_ip in allow_from:
self.logdebug('client_ip %s in %s\n' % (client_ip, allow_from))
return True
else:
self.logdebug('client_ip %s not in %s' % (client_ip, allow_from))
return False
def get_merged_config(self):
"""Get merged config file.
Returns an open StringIO containing the
merged config file.
"""
if self.yamldocs:
return
loadfiles = []
if self.configfile:
loadfiles.append(self.configfile)
if self.configdir:
# Gets list of all non-dotfile files from configdir.
loadfiles.extend(
[f for f in
[os.path.join(self.configdir, x) for x in
os.listdir(self.configdir)]
if os.path.isfile(f) and
not os.path.basename(f).startswith('.')])
merged_configfile = io.StringIO()
merged_configfile.write('-\n')
for thefile in loadfiles:
self.logdebug('reading in config file %s\n' % thefile)
merged_configfile.write(open(thefile).read())
merged_configfile.write('\n-\n')
merged_configfile.seek(0)
self.logdebug('merged log file: """\n%s\n"""\n' %
merged_configfile.read())
merged_configfile.seek(0)
return merged_configfile
def load(self):
"""Load our config, log and raise on error."""
try:
merged_configfile = self.get_merged_config()
self.yamldocs = yaml.load(merged_configfile, Loader=Loader)
# Strip out the top level 'None's we get from concatenation.
# Functionally not required, but makes dumps cleaner.
self.yamldocs = [x for x in self.yamldocs if x]
self.logdebug('parsed_rules:\n%s\n' % pretty(self.yamldocs))
except (yaml.scanner.ScannerError, yaml.parser.ParserError):
self.raise_and_log_error(ConfigError, 'error parsing config.')
def dump_config(self):
"""Pretty print the configuration dict to stdout."""
yaml_content = self.get_merged_config()
print('YAML Configuration\n%s\n' % yaml_content.read())
try:
self.load()
print('Python Configuration\n%s\n' % pretty(self.yamldocs))
except ConfigError:
sys.stderr.write(
'config parse error. try running with --logfile=/dev/tty\n')
raise
def install_key_data(self, keydata, target):
"""Install the key data into the open file."""
target.seek(0)
contents = target.read()
ssh_opts = 'no-port-forwarding'
if keydata in contents:
raise InstallError('key data already in file - refusing '
'to double-install.\n')
command = '%s --run' % self.authprogs_binary
if self.logfile:
command += ' --logfile=%s' % self.logfile
if self.keyname:
command += ' --keyname=%s' % self.keyname
target.write('command="%(command)s",%(ssh_opts)s %(keydata)s\n' %
{'command': command,
'keydata': keydata,
'ssh_opts': ssh_opts})
def install_key(self, keyfile, authorized_keys):
"""Install a key into the authorized_keys file."""
# Make the directory containing the authorized_keys
# file, if it doesn't exist. (Typically ~/.ssh).
# Ignore errors; we'll fail shortly if we can't
# create the authkeys file.
try:
os.makedirs(os.path.dirname(authorized_keys), 0o700)
except OSError:
pass
keydata = open(keyfile).read()
target_fd = os.open(authorized_keys, os.O_RDWR | os.O_CREAT, 0o600)
self.install_key_data(keydata, os.fdopen(target_fd, 'w+'))
def find_match_command(self, rule):
"""Return a matching (possibly munged) command, if found in rule."""
command_string = rule['command']
command_list = command_string.split()
self.logdebug('comparing "%s" to "%s"\n' %
(command_list, self.original_command_list))
if rule.get('allow_trailing_args'):
self.logdebug('allow_trailing_args is true - comparing initial '
'list.\n')
# Verify the initial arguments are all the same
if (self.original_command_list[:len(command_list)] ==
command_list):
self.logdebug('initial list is same\n')
return {'command': self.original_command_list}
else:
self.logdebug('initial list is not same\n')
elif rule.get('pcre_match'):
if re.search(command_string, self.original_command_string):
return {'command': self.original_command_list}
elif command_list == self.original_command_list:
return {'command': command_list}
def find_match(self):
"""Load the config and find a matching rule.
returns the results of find_match_command, a dict of
the command and (in the future) other metadata.
"""
self.load()
for yamldoc in self.yamldocs:
self.logdebug('\nchecking rule """%s"""\n' % yamldoc)
if not yamldoc:
continue
if not self.check_client_ip(yamldoc):
# Rejected - Client IP does not match
continue
if not self.check_keyname(yamldoc):
# Rejected - keyname does not match
continue
rules = yamldoc.get('allow')
if not isinstance(rules, list):
rules = [rules]
for rule in rules:
rule_type = rule.get('rule_type', 'command')
if rule_type == 'command':
sub = self.find_match_command
elif rule_type == 'scp':
sub = self.find_match_scp
else:
self.log('fatal: no such rule_type "%s"\n' % rule_type)
self.raise_and_log_error(ConfigError,
'error parsing config.')
match = sub(rule)
if match:
return match
# No matches, time to give up.
raise CommandRejected('command "%s" denied.' %
self.original_command_string)
def exec_command(self):
"""Glean the command to run and exec.
On problems, sys.exit.
This method should *never* return.
"""
if not self.original_command_string:
raise SSHEnvironmentError('no SSH command found; '
'interactive shell disallowed.')
command_info = {'from': self.get_client_ip(),
'keyname': self.keyname,
'ssh_original_comand': self.original_command_string,
'time': time.time()}
os.environ['AUTHPROGS_KEYNAME'] = self.keyname
retcode = 126
try:
match = self.find_match()
command_info['command'] = match.get('command')
self.logdebug('find_match returned "%s"\n' % match)
command = match['command']
retcode = subprocess.call(command)
command_info['code'] = retcode
self.log('result: %s\n' % command_info)
sys.exit(retcode)
except (CommandRejected, OSError) as err:
command_info['exception'] = '%s' % err
self.log('result: %s\n' % command_info)
sys.exit(retcode)
|
daethnir/authprogs
|
authprogs/authprogs.py
|
AuthProgs.find_match_command
|
python
|
def find_match_command(self, rule):
command_string = rule['command']
command_list = command_string.split()
self.logdebug('comparing "%s" to "%s"\n' %
(command_list, self.original_command_list))
if rule.get('allow_trailing_args'):
self.logdebug('allow_trailing_args is true - comparing initial '
'list.\n')
# Verify the initial arguments are all the same
if (self.original_command_list[:len(command_list)] ==
command_list):
self.logdebug('initial list is same\n')
return {'command': self.original_command_list}
else:
self.logdebug('initial list is not same\n')
elif rule.get('pcre_match'):
if re.search(command_string, self.original_command_string):
return {'command': self.original_command_list}
elif command_list == self.original_command_list:
return {'command': command_list}
|
Return a matching (possibly munged) command, if found in rule.
|
train
|
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/authprogs/authprogs.py#L344-L368
|
[
"def logdebug(self, message):\n \"\"\"Log debugging information.\"\"\"\n if self.debug:\n self.log(message)\n"
] |
class AuthProgs(object): # pylint: disable-msg=R0902
"""AuthProgs class"""
def __init__(self, logfile=None, configfile=None,
configdir=None, debug=False, **kwargs):
"""AuthProgs constructor.
kwargs include:
authprogs_binary: path to this binary, when creating
authorized_keys entries.
If not specified, determines from sys.argv[0]
name: the name of this key, for matching in rules.
"""
self.debug = debug
self.logfile = logfile
self.client_ip = None
if logfile:
self.logfh = open(logfile, 'a')
else:
self.logfh = False
if kwargs.get('authprogs_binary'):
self.authprogs_binary = kwargs['authprogs_binary']
else:
self.authprogs_binary = (
os.path.abspath(os.path.abspath(sys.argv[0])))
self.original_command_string = os.environ.get(
'SSH_ORIGINAL_COMMAND', '')
self.original_command_list = self.original_command_string.split()
self.keyname = kwargs.get('keyname')
if not self.keyname:
self.keyname = ''
if ' ' in self.keyname or '\t' in self.keyname:
self.log('FATAL: keyname contains space/tabs\n')
raise Error('--keyname may contain neither spaces nor tabs.')
self.yamldocs = None
self.configfile = configfile
self.configdir = configdir
def raise_and_log_error(self, error, message):
"""Raise error, including message and original traceback.
error: the error to raise
message: the user-facing error message
"""
self.log('raising %s, traceback %s\n' %
(error, traceback.format_exc()))
raise error(message)
def get_client_ip(self):
"""Return the client IP from the environment."""
if self.client_ip:
return self.client_ip
try:
client = os.environ.get('SSH_CONNECTION',
os.environ.get('SSH_CLIENT'))
self.client_ip = client.split()[0]
self.logdebug('client_ip: %s\n' % self.client_ip)
return self.client_ip
except:
raise SSHEnvironmentError('cannot identify the ssh client '
'IP address')
def logdebug(self, message):
"""Log debugging information."""
if self.debug:
self.log(message)
def log(self, message):
"""Log information."""
if self.logfh:
self.logfh.write(message) # pylint: disable-msg=E1103
def check_keyname(self, rule):
"""If a key name is specified, verify it is permitted."""
keynames = rule.get('keynames')
if not keynames:
self.logdebug('no keynames requirement.\n')
return True
if not isinstance(keynames, list):
keynames = [keynames]
if self.keyname in keynames:
self.logdebug('keyname "%s" matches rule.\n' % self.keyname)
return True
else:
self.logdebug('keyname "%s" does not match rule.\n' % self.keyname)
return False
def check_client_ip(self, rule):
"""If a client IP is specified, verify it is permitted."""
if not rule.get('from'):
self.logdebug('no "from" requirement.\n')
return True
allow_from = rule.get('from')
if not isinstance(allow_from, list):
allow_from = [allow_from]
client_ip = self.get_client_ip()
if client_ip in allow_from:
self.logdebug('client_ip %s in %s\n' % (client_ip, allow_from))
return True
else:
self.logdebug('client_ip %s not in %s' % (client_ip, allow_from))
return False
def get_merged_config(self):
"""Get merged config file.
Returns an open StringIO containing the
merged config file.
"""
if self.yamldocs:
return
loadfiles = []
if self.configfile:
loadfiles.append(self.configfile)
if self.configdir:
# Gets list of all non-dotfile files from configdir.
loadfiles.extend(
[f for f in
[os.path.join(self.configdir, x) for x in
os.listdir(self.configdir)]
if os.path.isfile(f) and
not os.path.basename(f).startswith('.')])
merged_configfile = io.StringIO()
merged_configfile.write('-\n')
for thefile in loadfiles:
self.logdebug('reading in config file %s\n' % thefile)
merged_configfile.write(open(thefile).read())
merged_configfile.write('\n-\n')
merged_configfile.seek(0)
self.logdebug('merged log file: """\n%s\n"""\n' %
merged_configfile.read())
merged_configfile.seek(0)
return merged_configfile
def load(self):
"""Load our config, log and raise on error."""
try:
merged_configfile = self.get_merged_config()
self.yamldocs = yaml.load(merged_configfile, Loader=Loader)
# Strip out the top level 'None's we get from concatenation.
# Functionally not required, but makes dumps cleaner.
self.yamldocs = [x for x in self.yamldocs if x]
self.logdebug('parsed_rules:\n%s\n' % pretty(self.yamldocs))
except (yaml.scanner.ScannerError, yaml.parser.ParserError):
self.raise_and_log_error(ConfigError, 'error parsing config.')
def dump_config(self):
"""Pretty print the configuration dict to stdout."""
yaml_content = self.get_merged_config()
print('YAML Configuration\n%s\n' % yaml_content.read())
try:
self.load()
print('Python Configuration\n%s\n' % pretty(self.yamldocs))
except ConfigError:
sys.stderr.write(
'config parse error. try running with --logfile=/dev/tty\n')
raise
def install_key_data(self, keydata, target):
"""Install the key data into the open file."""
target.seek(0)
contents = target.read()
ssh_opts = 'no-port-forwarding'
if keydata in contents:
raise InstallError('key data already in file - refusing '
'to double-install.\n')
command = '%s --run' % self.authprogs_binary
if self.logfile:
command += ' --logfile=%s' % self.logfile
if self.keyname:
command += ' --keyname=%s' % self.keyname
target.write('command="%(command)s",%(ssh_opts)s %(keydata)s\n' %
{'command': command,
'keydata': keydata,
'ssh_opts': ssh_opts})
def install_key(self, keyfile, authorized_keys):
"""Install a key into the authorized_keys file."""
# Make the directory containing the authorized_keys
# file, if it doesn't exist. (Typically ~/.ssh).
# Ignore errors; we'll fail shortly if we can't
# create the authkeys file.
try:
os.makedirs(os.path.dirname(authorized_keys), 0o700)
except OSError:
pass
keydata = open(keyfile).read()
target_fd = os.open(authorized_keys, os.O_RDWR | os.O_CREAT, 0o600)
self.install_key_data(keydata, os.fdopen(target_fd, 'w+'))
def find_match_scp(self, rule): # pylint: disable-msg=R0911,R0912
"""Handle scp commands."""
orig_list = []
orig_list.extend(self.original_command_list)
binary = orig_list.pop(0)
allowed_binaries = ['scp', '/usr/bin/scp']
if binary not in allowed_binaries:
self.logdebug('skipping scp processing - binary "%s" '
'not in approved list.\n' % binary)
return
filepath = orig_list.pop()
arguments = orig_list
if '-f' in arguments:
if not rule.get('allow_download'):
self.logdebug('scp denied - downloading forbidden.\n')
return
if '-t' in arguments:
if not rule.get('allow_upload'):
self.log('scp denied - uploading forbidden.\n')
return
if '-r' in arguments:
if not rule.get('allow_recursion'):
self.log('scp denied - recursive transfers forbidden.\n')
return
if '-p' in arguments:
if not rule.get('allow_permissions', 'true'):
self.log('scp denied - set/getting permissions '
'forbidden.\n')
return
if rule.get('files'):
files = rule.get('files')
if not isinstance(files, list):
files = [files]
if filepath not in files:
self.log('scp denied - file "%s" - not in approved '
'list %s\n' % (filepath, files))
return
# Allow it!
return {'command': self.original_command_list}
def find_match(self):
"""Load the config and find a matching rule.
returns the results of find_match_command, a dict of
the command and (in the future) other metadata.
"""
self.load()
for yamldoc in self.yamldocs:
self.logdebug('\nchecking rule """%s"""\n' % yamldoc)
if not yamldoc:
continue
if not self.check_client_ip(yamldoc):
# Rejected - Client IP does not match
continue
if not self.check_keyname(yamldoc):
# Rejected - keyname does not match
continue
rules = yamldoc.get('allow')
if not isinstance(rules, list):
rules = [rules]
for rule in rules:
rule_type = rule.get('rule_type', 'command')
if rule_type == 'command':
sub = self.find_match_command
elif rule_type == 'scp':
sub = self.find_match_scp
else:
self.log('fatal: no such rule_type "%s"\n' % rule_type)
self.raise_and_log_error(ConfigError,
'error parsing config.')
match = sub(rule)
if match:
return match
# No matches, time to give up.
raise CommandRejected('command "%s" denied.' %
self.original_command_string)
def exec_command(self):
"""Glean the command to run and exec.
On problems, sys.exit.
This method should *never* return.
"""
if not self.original_command_string:
raise SSHEnvironmentError('no SSH command found; '
'interactive shell disallowed.')
command_info = {'from': self.get_client_ip(),
'keyname': self.keyname,
'ssh_original_comand': self.original_command_string,
'time': time.time()}
os.environ['AUTHPROGS_KEYNAME'] = self.keyname
retcode = 126
try:
match = self.find_match()
command_info['command'] = match.get('command')
self.logdebug('find_match returned "%s"\n' % match)
command = match['command']
retcode = subprocess.call(command)
command_info['code'] = retcode
self.log('result: %s\n' % command_info)
sys.exit(retcode)
except (CommandRejected, OSError) as err:
command_info['exception'] = '%s' % err
self.log('result: %s\n' % command_info)
sys.exit(retcode)
|
daethnir/authprogs
|
authprogs/authprogs.py
|
AuthProgs.find_match
|
python
|
def find_match(self):
self.load()
for yamldoc in self.yamldocs:
self.logdebug('\nchecking rule """%s"""\n' % yamldoc)
if not yamldoc:
continue
if not self.check_client_ip(yamldoc):
# Rejected - Client IP does not match
continue
if not self.check_keyname(yamldoc):
# Rejected - keyname does not match
continue
rules = yamldoc.get('allow')
if not isinstance(rules, list):
rules = [rules]
for rule in rules:
rule_type = rule.get('rule_type', 'command')
if rule_type == 'command':
sub = self.find_match_command
elif rule_type == 'scp':
sub = self.find_match_scp
else:
self.log('fatal: no such rule_type "%s"\n' % rule_type)
self.raise_and_log_error(ConfigError,
'error parsing config.')
match = sub(rule)
if match:
return match
# No matches, time to give up.
raise CommandRejected('command "%s" denied.' %
self.original_command_string)
|
Load the config and find a matching rule.
returns the results of find_match_command, a dict of
the command and (in the future) other metadata.
|
train
|
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/authprogs/authprogs.py#L370-L413
|
[
"def logdebug(self, message):\n \"\"\"Log debugging information.\"\"\"\n if self.debug:\n self.log(message)\n",
"def check_client_ip(self, rule):\n \"\"\"If a client IP is specified, verify it is permitted.\"\"\"\n\n if not rule.get('from'):\n self.logdebug('no \"from\" requirement.\\n')\n return True\n\n allow_from = rule.get('from')\n if not isinstance(allow_from, list):\n allow_from = [allow_from]\n client_ip = self.get_client_ip()\n\n if client_ip in allow_from:\n self.logdebug('client_ip %s in %s\\n' % (client_ip, allow_from))\n return True\n else:\n self.logdebug('client_ip %s not in %s' % (client_ip, allow_from))\n return False\n",
"def load(self):\n \"\"\"Load our config, log and raise on error.\"\"\"\n try:\n merged_configfile = self.get_merged_config()\n self.yamldocs = yaml.load(merged_configfile, Loader=Loader)\n\n # Strip out the top level 'None's we get from concatenation.\n # Functionally not required, but makes dumps cleaner.\n self.yamldocs = [x for x in self.yamldocs if x]\n self.logdebug('parsed_rules:\\n%s\\n' % pretty(self.yamldocs))\n\n except (yaml.scanner.ScannerError, yaml.parser.ParserError):\n self.raise_and_log_error(ConfigError, 'error parsing config.')\n"
] |
class AuthProgs(object): # pylint: disable-msg=R0902
"""AuthProgs class"""
def __init__(self, logfile=None, configfile=None,
configdir=None, debug=False, **kwargs):
"""AuthProgs constructor.
kwargs include:
authprogs_binary: path to this binary, when creating
authorized_keys entries.
If not specified, determines from sys.argv[0]
name: the name of this key, for matching in rules.
"""
self.debug = debug
self.logfile = logfile
self.client_ip = None
if logfile:
self.logfh = open(logfile, 'a')
else:
self.logfh = False
if kwargs.get('authprogs_binary'):
self.authprogs_binary = kwargs['authprogs_binary']
else:
self.authprogs_binary = (
os.path.abspath(os.path.abspath(sys.argv[0])))
self.original_command_string = os.environ.get(
'SSH_ORIGINAL_COMMAND', '')
self.original_command_list = self.original_command_string.split()
self.keyname = kwargs.get('keyname')
if not self.keyname:
self.keyname = ''
if ' ' in self.keyname or '\t' in self.keyname:
self.log('FATAL: keyname contains space/tabs\n')
raise Error('--keyname may contain neither spaces nor tabs.')
self.yamldocs = None
self.configfile = configfile
self.configdir = configdir
def raise_and_log_error(self, error, message):
"""Raise error, including message and original traceback.
error: the error to raise
message: the user-facing error message
"""
self.log('raising %s, traceback %s\n' %
(error, traceback.format_exc()))
raise error(message)
def get_client_ip(self):
"""Return the client IP from the environment."""
if self.client_ip:
return self.client_ip
try:
client = os.environ.get('SSH_CONNECTION',
os.environ.get('SSH_CLIENT'))
self.client_ip = client.split()[0]
self.logdebug('client_ip: %s\n' % self.client_ip)
return self.client_ip
except:
raise SSHEnvironmentError('cannot identify the ssh client '
'IP address')
def logdebug(self, message):
"""Log debugging information."""
if self.debug:
self.log(message)
def log(self, message):
"""Log information."""
if self.logfh:
self.logfh.write(message) # pylint: disable-msg=E1103
def check_keyname(self, rule):
"""If a key name is specified, verify it is permitted."""
keynames = rule.get('keynames')
if not keynames:
self.logdebug('no keynames requirement.\n')
return True
if not isinstance(keynames, list):
keynames = [keynames]
if self.keyname in keynames:
self.logdebug('keyname "%s" matches rule.\n' % self.keyname)
return True
else:
self.logdebug('keyname "%s" does not match rule.\n' % self.keyname)
return False
def check_client_ip(self, rule):
"""If a client IP is specified, verify it is permitted."""
if not rule.get('from'):
self.logdebug('no "from" requirement.\n')
return True
allow_from = rule.get('from')
if not isinstance(allow_from, list):
allow_from = [allow_from]
client_ip = self.get_client_ip()
if client_ip in allow_from:
self.logdebug('client_ip %s in %s\n' % (client_ip, allow_from))
return True
else:
self.logdebug('client_ip %s not in %s' % (client_ip, allow_from))
return False
def get_merged_config(self):
"""Get merged config file.
Returns an open StringIO containing the
merged config file.
"""
if self.yamldocs:
return
loadfiles = []
if self.configfile:
loadfiles.append(self.configfile)
if self.configdir:
# Gets list of all non-dotfile files from configdir.
loadfiles.extend(
[f for f in
[os.path.join(self.configdir, x) for x in
os.listdir(self.configdir)]
if os.path.isfile(f) and
not os.path.basename(f).startswith('.')])
merged_configfile = io.StringIO()
merged_configfile.write('-\n')
for thefile in loadfiles:
self.logdebug('reading in config file %s\n' % thefile)
merged_configfile.write(open(thefile).read())
merged_configfile.write('\n-\n')
merged_configfile.seek(0)
self.logdebug('merged log file: """\n%s\n"""\n' %
merged_configfile.read())
merged_configfile.seek(0)
return merged_configfile
def load(self):
"""Load our config, log and raise on error."""
try:
merged_configfile = self.get_merged_config()
self.yamldocs = yaml.load(merged_configfile, Loader=Loader)
# Strip out the top level 'None's we get from concatenation.
# Functionally not required, but makes dumps cleaner.
self.yamldocs = [x for x in self.yamldocs if x]
self.logdebug('parsed_rules:\n%s\n' % pretty(self.yamldocs))
except (yaml.scanner.ScannerError, yaml.parser.ParserError):
self.raise_and_log_error(ConfigError, 'error parsing config.')
def dump_config(self):
"""Pretty print the configuration dict to stdout."""
yaml_content = self.get_merged_config()
print('YAML Configuration\n%s\n' % yaml_content.read())
try:
self.load()
print('Python Configuration\n%s\n' % pretty(self.yamldocs))
except ConfigError:
sys.stderr.write(
'config parse error. try running with --logfile=/dev/tty\n')
raise
def install_key_data(self, keydata, target):
"""Install the key data into the open file."""
target.seek(0)
contents = target.read()
ssh_opts = 'no-port-forwarding'
if keydata in contents:
raise InstallError('key data already in file - refusing '
'to double-install.\n')
command = '%s --run' % self.authprogs_binary
if self.logfile:
command += ' --logfile=%s' % self.logfile
if self.keyname:
command += ' --keyname=%s' % self.keyname
target.write('command="%(command)s",%(ssh_opts)s %(keydata)s\n' %
{'command': command,
'keydata': keydata,
'ssh_opts': ssh_opts})
def install_key(self, keyfile, authorized_keys):
"""Install a key into the authorized_keys file."""
# Make the directory containing the authorized_keys
# file, if it doesn't exist. (Typically ~/.ssh).
# Ignore errors; we'll fail shortly if we can't
# create the authkeys file.
try:
os.makedirs(os.path.dirname(authorized_keys), 0o700)
except OSError:
pass
keydata = open(keyfile).read()
target_fd = os.open(authorized_keys, os.O_RDWR | os.O_CREAT, 0o600)
self.install_key_data(keydata, os.fdopen(target_fd, 'w+'))
def find_match_scp(self, rule): # pylint: disable-msg=R0911,R0912
"""Handle scp commands."""
orig_list = []
orig_list.extend(self.original_command_list)
binary = orig_list.pop(0)
allowed_binaries = ['scp', '/usr/bin/scp']
if binary not in allowed_binaries:
self.logdebug('skipping scp processing - binary "%s" '
'not in approved list.\n' % binary)
return
filepath = orig_list.pop()
arguments = orig_list
if '-f' in arguments:
if not rule.get('allow_download'):
self.logdebug('scp denied - downloading forbidden.\n')
return
if '-t' in arguments:
if not rule.get('allow_upload'):
self.log('scp denied - uploading forbidden.\n')
return
if '-r' in arguments:
if not rule.get('allow_recursion'):
self.log('scp denied - recursive transfers forbidden.\n')
return
if '-p' in arguments:
if not rule.get('allow_permissions', 'true'):
self.log('scp denied - set/getting permissions '
'forbidden.\n')
return
if rule.get('files'):
files = rule.get('files')
if not isinstance(files, list):
files = [files]
if filepath not in files:
self.log('scp denied - file "%s" - not in approved '
'list %s\n' % (filepath, files))
return
# Allow it!
return {'command': self.original_command_list}
def find_match_command(self, rule):
"""Return a matching (possibly munged) command, if found in rule."""
command_string = rule['command']
command_list = command_string.split()
self.logdebug('comparing "%s" to "%s"\n' %
(command_list, self.original_command_list))
if rule.get('allow_trailing_args'):
self.logdebug('allow_trailing_args is true - comparing initial '
'list.\n')
# Verify the initial arguments are all the same
if (self.original_command_list[:len(command_list)] ==
command_list):
self.logdebug('initial list is same\n')
return {'command': self.original_command_list}
else:
self.logdebug('initial list is not same\n')
elif rule.get('pcre_match'):
if re.search(command_string, self.original_command_string):
return {'command': self.original_command_list}
elif command_list == self.original_command_list:
return {'command': command_list}
def exec_command(self):
"""Glean the command to run and exec.
On problems, sys.exit.
This method should *never* return.
"""
if not self.original_command_string:
raise SSHEnvironmentError('no SSH command found; '
'interactive shell disallowed.')
command_info = {'from': self.get_client_ip(),
'keyname': self.keyname,
'ssh_original_comand': self.original_command_string,
'time': time.time()}
os.environ['AUTHPROGS_KEYNAME'] = self.keyname
retcode = 126
try:
match = self.find_match()
command_info['command'] = match.get('command')
self.logdebug('find_match returned "%s"\n' % match)
command = match['command']
retcode = subprocess.call(command)
command_info['code'] = retcode
self.log('result: %s\n' % command_info)
sys.exit(retcode)
except (CommandRejected, OSError) as err:
command_info['exception'] = '%s' % err
self.log('result: %s\n' % command_info)
sys.exit(retcode)
|
daethnir/authprogs
|
authprogs/authprogs.py
|
AuthProgs.exec_command
|
python
|
def exec_command(self):
if not self.original_command_string:
raise SSHEnvironmentError('no SSH command found; '
'interactive shell disallowed.')
command_info = {'from': self.get_client_ip(),
'keyname': self.keyname,
'ssh_original_comand': self.original_command_string,
'time': time.time()}
os.environ['AUTHPROGS_KEYNAME'] = self.keyname
retcode = 126
try:
match = self.find_match()
command_info['command'] = match.get('command')
self.logdebug('find_match returned "%s"\n' % match)
command = match['command']
retcode = subprocess.call(command)
command_info['code'] = retcode
self.log('result: %s\n' % command_info)
sys.exit(retcode)
except (CommandRejected, OSError) as err:
command_info['exception'] = '%s' % err
self.log('result: %s\n' % command_info)
sys.exit(retcode)
|
Glean the command to run and exec.
On problems, sys.exit.
This method should *never* return.
|
train
|
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/authprogs/authprogs.py#L415-L446
|
[
"def get_client_ip(self):\n \"\"\"Return the client IP from the environment.\"\"\"\n\n if self.client_ip:\n return self.client_ip\n\n try:\n client = os.environ.get('SSH_CONNECTION',\n os.environ.get('SSH_CLIENT'))\n self.client_ip = client.split()[0]\n self.logdebug('client_ip: %s\\n' % self.client_ip)\n return self.client_ip\n except:\n raise SSHEnvironmentError('cannot identify the ssh client '\n 'IP address')\n",
"def logdebug(self, message):\n \"\"\"Log debugging information.\"\"\"\n if self.debug:\n self.log(message)\n",
"def log(self, message):\n \"\"\"Log information.\"\"\"\n if self.logfh:\n self.logfh.write(message) # pylint: disable-msg=E1103\n",
"def find_match(self):\n \"\"\"Load the config and find a matching rule.\n\n returns the results of find_match_command, a dict of\n the command and (in the future) other metadata.\n \"\"\"\n\n self.load()\n for yamldoc in self.yamldocs:\n self.logdebug('\\nchecking rule \"\"\"%s\"\"\"\\n' % yamldoc)\n\n if not yamldoc:\n continue\n\n if not self.check_client_ip(yamldoc):\n # Rejected - Client IP does not match\n continue\n\n if not self.check_keyname(yamldoc):\n # Rejected - keyname does not match\n continue\n\n rules = yamldoc.get('allow')\n if not isinstance(rules, list):\n rules = [rules]\n\n for rule in rules:\n rule_type = rule.get('rule_type', 'command')\n if rule_type == 'command':\n sub = self.find_match_command\n elif rule_type == 'scp':\n sub = self.find_match_scp\n else:\n self.log('fatal: no such rule_type \"%s\"\\n' % rule_type)\n self.raise_and_log_error(ConfigError,\n 'error parsing config.')\n\n match = sub(rule)\n if match:\n return match\n\n # No matches, time to give up.\n raise CommandRejected('command \"%s\" denied.' %\n self.original_command_string)\n"
] |
class AuthProgs(object): # pylint: disable-msg=R0902
"""AuthProgs class"""
def __init__(self, logfile=None, configfile=None,
configdir=None, debug=False, **kwargs):
"""AuthProgs constructor.
kwargs include:
authprogs_binary: path to this binary, when creating
authorized_keys entries.
If not specified, determines from sys.argv[0]
name: the name of this key, for matching in rules.
"""
self.debug = debug
self.logfile = logfile
self.client_ip = None
if logfile:
self.logfh = open(logfile, 'a')
else:
self.logfh = False
if kwargs.get('authprogs_binary'):
self.authprogs_binary = kwargs['authprogs_binary']
else:
self.authprogs_binary = (
os.path.abspath(os.path.abspath(sys.argv[0])))
self.original_command_string = os.environ.get(
'SSH_ORIGINAL_COMMAND', '')
self.original_command_list = self.original_command_string.split()
self.keyname = kwargs.get('keyname')
if not self.keyname:
self.keyname = ''
if ' ' in self.keyname or '\t' in self.keyname:
self.log('FATAL: keyname contains space/tabs\n')
raise Error('--keyname may contain neither spaces nor tabs.')
self.yamldocs = None
self.configfile = configfile
self.configdir = configdir
def raise_and_log_error(self, error, message):
"""Raise error, including message and original traceback.
error: the error to raise
message: the user-facing error message
"""
self.log('raising %s, traceback %s\n' %
(error, traceback.format_exc()))
raise error(message)
def get_client_ip(self):
"""Return the client IP from the environment."""
if self.client_ip:
return self.client_ip
try:
client = os.environ.get('SSH_CONNECTION',
os.environ.get('SSH_CLIENT'))
self.client_ip = client.split()[0]
self.logdebug('client_ip: %s\n' % self.client_ip)
return self.client_ip
except:
raise SSHEnvironmentError('cannot identify the ssh client '
'IP address')
def logdebug(self, message):
"""Log debugging information."""
if self.debug:
self.log(message)
def log(self, message):
"""Log information."""
if self.logfh:
self.logfh.write(message) # pylint: disable-msg=E1103
def check_keyname(self, rule):
"""If a key name is specified, verify it is permitted."""
keynames = rule.get('keynames')
if not keynames:
self.logdebug('no keynames requirement.\n')
return True
if not isinstance(keynames, list):
keynames = [keynames]
if self.keyname in keynames:
self.logdebug('keyname "%s" matches rule.\n' % self.keyname)
return True
else:
self.logdebug('keyname "%s" does not match rule.\n' % self.keyname)
return False
def check_client_ip(self, rule):
"""If a client IP is specified, verify it is permitted."""
if not rule.get('from'):
self.logdebug('no "from" requirement.\n')
return True
allow_from = rule.get('from')
if not isinstance(allow_from, list):
allow_from = [allow_from]
client_ip = self.get_client_ip()
if client_ip in allow_from:
self.logdebug('client_ip %s in %s\n' % (client_ip, allow_from))
return True
else:
self.logdebug('client_ip %s not in %s' % (client_ip, allow_from))
return False
def get_merged_config(self):
"""Get merged config file.
Returns an open StringIO containing the
merged config file.
"""
if self.yamldocs:
return
loadfiles = []
if self.configfile:
loadfiles.append(self.configfile)
if self.configdir:
# Gets list of all non-dotfile files from configdir.
loadfiles.extend(
[f for f in
[os.path.join(self.configdir, x) for x in
os.listdir(self.configdir)]
if os.path.isfile(f) and
not os.path.basename(f).startswith('.')])
merged_configfile = io.StringIO()
merged_configfile.write('-\n')
for thefile in loadfiles:
self.logdebug('reading in config file %s\n' % thefile)
merged_configfile.write(open(thefile).read())
merged_configfile.write('\n-\n')
merged_configfile.seek(0)
self.logdebug('merged log file: """\n%s\n"""\n' %
merged_configfile.read())
merged_configfile.seek(0)
return merged_configfile
def load(self):
"""Load our config, log and raise on error."""
try:
merged_configfile = self.get_merged_config()
self.yamldocs = yaml.load(merged_configfile, Loader=Loader)
# Strip out the top level 'None's we get from concatenation.
# Functionally not required, but makes dumps cleaner.
self.yamldocs = [x for x in self.yamldocs if x]
self.logdebug('parsed_rules:\n%s\n' % pretty(self.yamldocs))
except (yaml.scanner.ScannerError, yaml.parser.ParserError):
self.raise_and_log_error(ConfigError, 'error parsing config.')
def dump_config(self):
"""Pretty print the configuration dict to stdout."""
yaml_content = self.get_merged_config()
print('YAML Configuration\n%s\n' % yaml_content.read())
try:
self.load()
print('Python Configuration\n%s\n' % pretty(self.yamldocs))
except ConfigError:
sys.stderr.write(
'config parse error. try running with --logfile=/dev/tty\n')
raise
def install_key_data(self, keydata, target):
"""Install the key data into the open file."""
target.seek(0)
contents = target.read()
ssh_opts = 'no-port-forwarding'
if keydata in contents:
raise InstallError('key data already in file - refusing '
'to double-install.\n')
command = '%s --run' % self.authprogs_binary
if self.logfile:
command += ' --logfile=%s' % self.logfile
if self.keyname:
command += ' --keyname=%s' % self.keyname
target.write('command="%(command)s",%(ssh_opts)s %(keydata)s\n' %
{'command': command,
'keydata': keydata,
'ssh_opts': ssh_opts})
def install_key(self, keyfile, authorized_keys):
"""Install a key into the authorized_keys file."""
# Make the directory containing the authorized_keys
# file, if it doesn't exist. (Typically ~/.ssh).
# Ignore errors; we'll fail shortly if we can't
# create the authkeys file.
try:
os.makedirs(os.path.dirname(authorized_keys), 0o700)
except OSError:
pass
keydata = open(keyfile).read()
target_fd = os.open(authorized_keys, os.O_RDWR | os.O_CREAT, 0o600)
self.install_key_data(keydata, os.fdopen(target_fd, 'w+'))
def find_match_scp(self, rule): # pylint: disable-msg=R0911,R0912
"""Handle scp commands."""
orig_list = []
orig_list.extend(self.original_command_list)
binary = orig_list.pop(0)
allowed_binaries = ['scp', '/usr/bin/scp']
if binary not in allowed_binaries:
self.logdebug('skipping scp processing - binary "%s" '
'not in approved list.\n' % binary)
return
filepath = orig_list.pop()
arguments = orig_list
if '-f' in arguments:
if not rule.get('allow_download'):
self.logdebug('scp denied - downloading forbidden.\n')
return
if '-t' in arguments:
if not rule.get('allow_upload'):
self.log('scp denied - uploading forbidden.\n')
return
if '-r' in arguments:
if not rule.get('allow_recursion'):
self.log('scp denied - recursive transfers forbidden.\n')
return
if '-p' in arguments:
if not rule.get('allow_permissions', 'true'):
self.log('scp denied - set/getting permissions '
'forbidden.\n')
return
if rule.get('files'):
files = rule.get('files')
if not isinstance(files, list):
files = [files]
if filepath not in files:
self.log('scp denied - file "%s" - not in approved '
'list %s\n' % (filepath, files))
return
# Allow it!
return {'command': self.original_command_list}
def find_match_command(self, rule):
"""Return a matching (possibly munged) command, if found in rule."""
command_string = rule['command']
command_list = command_string.split()
self.logdebug('comparing "%s" to "%s"\n' %
(command_list, self.original_command_list))
if rule.get('allow_trailing_args'):
self.logdebug('allow_trailing_args is true - comparing initial '
'list.\n')
# Verify the initial arguments are all the same
if (self.original_command_list[:len(command_list)] ==
command_list):
self.logdebug('initial list is same\n')
return {'command': self.original_command_list}
else:
self.logdebug('initial list is not same\n')
elif rule.get('pcre_match'):
if re.search(command_string, self.original_command_string):
return {'command': self.original_command_list}
elif command_list == self.original_command_list:
return {'command': command_list}
def find_match(self):
"""Load the config and find a matching rule.
returns the results of find_match_command, a dict of
the command and (in the future) other metadata.
"""
self.load()
for yamldoc in self.yamldocs:
self.logdebug('\nchecking rule """%s"""\n' % yamldoc)
if not yamldoc:
continue
if not self.check_client_ip(yamldoc):
# Rejected - Client IP does not match
continue
if not self.check_keyname(yamldoc):
# Rejected - keyname does not match
continue
rules = yamldoc.get('allow')
if not isinstance(rules, list):
rules = [rules]
for rule in rules:
rule_type = rule.get('rule_type', 'command')
if rule_type == 'command':
sub = self.find_match_command
elif rule_type == 'scp':
sub = self.find_match_scp
else:
self.log('fatal: no such rule_type "%s"\n' % rule_type)
self.raise_and_log_error(ConfigError,
'error parsing config.')
match = sub(rule)
if match:
return match
# No matches, time to give up.
raise CommandRejected('command "%s" denied.' %
self.original_command_string)
|
daethnir/authprogs
|
setup.py
|
runcmd
|
python
|
def runcmd(command, command_input=None, cwd=None):
proc = subprocess.Popen(command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd)
(stdout, stderr) = proc.communicate(command_input)
if proc.returncode != 0:
sys.stderr.write('ABORTING: command "%s" failed w/ code %s:\n'
'%s\n%s' % (command, proc.returncode,
stdout, stderr))
sys.exit(proc.returncode)
return proc.returncode, stdout, stderr
|
Run a command, potentially sending stdin, and capturing stdout/err.
|
train
|
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/setup.py#L33-L45
| null |
#!/usr/bin/env python
"""Authprogs setup.py"""
# pylint: disable-msg=W0511
# pylint: disable-msg=R0904
import authprogs
import os
import shutil
import subprocess
import sys
from setuptools import setup
from setuptools.command.install import install
from setuptools.command.sdist import sdist
# Documents that should be converted or renamed from markdown
MARKDOWN2HTML = ['authprogs']
MARKDOWN2TEXT = ['AUTHORS', 'INSTALL', 'README', 'TODO']
if sys.version_info[0] == 2:
console_script = 'authprogs'
else:
console_script = 'authprogs%d' % sys.version_info.major
def long_description():
"""Read our long description from the fs."""
with open('doc/description.rst') as filed:
return filed.read()
class Converter(object):
"""Documentation conversion class."""
def __init__(self):
"""Init."""
self.created = []
def dd_docs(self):
"""Copy and convert various documentation files."""
top = os.path.join(os.path.dirname(__file__))
doc = os.path.join(top, 'doc')
# Markdown to ronn to man page
man_md = os.path.join(doc, 'authprogs.md')
man_ronn = os.path.join(doc, 'authprogs.1.ronn')
man_1 = os.path.join(doc, 'authprogs.1')
# Create manpage
try:
if not os.path.exists(man_1):
shutil.copy(man_md, man_ronn)
self.created.append(man_ronn)
retval = subprocess.call(['ronn', '-r', man_ronn])
if retval != 0:
raise Exception('ronn man page conversion failed, '
'returned %s' % retval)
self.created.append(man_1)
except:
raise Exception('ronn required for manpage conversion - do you '
'have it installed?')
# Markdown files in docs dir get converted to .html
for name in MARKDOWN2HTML:
htmlfile = os.path.join(doc, '%s.html' % name)
if os.path.exists(htmlfile):
continue
target = open(htmlfile, 'w')
self.created.append(htmlfile)
stdout = runcmd(['python', '-m', 'markdown',
os.path.join(doc, '%s.md' % name)])[1]
if not stdout:
raise Exception('markdown conversion failed, no output.')
target.write(stdout)
target.close()
# Markdown files in top level just get renamed sans .md
for name in MARKDOWN2TEXT:
target = os.path.join(top, name)
if os.path.exists(target):
continue
source = os.path.join(top, '%s.md' % target)
shutil.copy(source, target)
self.created.append(target)
def rm_docs(self):
"""Remove converted docs."""
for filename in self.created:
if os.path.exists(filename):
os.unlink(filename)
class APInstall(install):
"""Create man pages and share/doc files from markdown/etc source."""
def run(self):
converter = Converter()
converter.dd_docs()
install.run(self)
converter.rm_docs()
class APSdist(sdist):
"""Convert markdown for sdist packaging."""
def run(self):
converter = Converter()
converter.dd_docs()
sdist.run(self)
converter.rm_docs()
setup(
name='authprogs',
version=authprogs.__version__,
description='SSH Command Authenticator',
long_description=long_description(),
keywords='authprogs ssh pubkey identity authoried_keys security',
url='http://github.com/daethnir/authprogs',
author='Bri Hatch',
author_email='bri@ifokr.org',
license='GPLv2',
maintainer='Bri Hatch',
maintainer_email='bri@ifokr.org',
packages=['authprogs'],
data_files=[
('share/man/man1/', ['doc/authprogs.1']),
('share/doc/authprogs/',
['AUTHORS', 'COPYING', 'INSTALL', 'README',
'TODO', 'doc/authprogs.html'])],
test_suite='authprogs.tests',
setup_requires=['markdown'],
install_requires=['pyyaml'],
zip_safe=False,
cmdclass={"install": APInstall, "sdist": APSdist},
entry_points={
'console_scripts': ['%s = authprogs.authprogs:main' % console_script]
},
)
|
daethnir/authprogs
|
setup.py
|
Converter.dd_docs
|
python
|
def dd_docs(self):
top = os.path.join(os.path.dirname(__file__))
doc = os.path.join(top, 'doc')
# Markdown to ronn to man page
man_md = os.path.join(doc, 'authprogs.md')
man_ronn = os.path.join(doc, 'authprogs.1.ronn')
man_1 = os.path.join(doc, 'authprogs.1')
# Create manpage
try:
if not os.path.exists(man_1):
shutil.copy(man_md, man_ronn)
self.created.append(man_ronn)
retval = subprocess.call(['ronn', '-r', man_ronn])
if retval != 0:
raise Exception('ronn man page conversion failed, '
'returned %s' % retval)
self.created.append(man_1)
except:
raise Exception('ronn required for manpage conversion - do you '
'have it installed?')
# Markdown files in docs dir get converted to .html
for name in MARKDOWN2HTML:
htmlfile = os.path.join(doc, '%s.html' % name)
if os.path.exists(htmlfile):
continue
target = open(htmlfile, 'w')
self.created.append(htmlfile)
stdout = runcmd(['python', '-m', 'markdown',
os.path.join(doc, '%s.md' % name)])[1]
if not stdout:
raise Exception('markdown conversion failed, no output.')
target.write(stdout)
target.close()
# Markdown files in top level just get renamed sans .md
for name in MARKDOWN2TEXT:
target = os.path.join(top, name)
if os.path.exists(target):
continue
source = os.path.join(top, '%s.md' % target)
shutil.copy(source, target)
self.created.append(target)
|
Copy and convert various documentation files.
|
train
|
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/setup.py#L54-L100
|
[
"def runcmd(command, command_input=None, cwd=None):\n \"\"\"Run a command, potentially sending stdin, and capturing stdout/err.\"\"\"\n proc = subprocess.Popen(command, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=cwd)\n (stdout, stderr) = proc.communicate(command_input)\n if proc.returncode != 0:\n sys.stderr.write('ABORTING: command \"%s\" failed w/ code %s:\\n'\n '%s\\n%s' % (command, proc.returncode,\n stdout, stderr))\n sys.exit(proc.returncode)\n return proc.returncode, stdout, stderr\n"
] |
class Converter(object):
"""Documentation conversion class."""
def __init__(self):
"""Init."""
self.created = []
def rm_docs(self):
"""Remove converted docs."""
for filename in self.created:
if os.path.exists(filename):
os.unlink(filename)
|
daethnir/authprogs
|
setup.py
|
Converter.rm_docs
|
python
|
def rm_docs(self):
for filename in self.created:
if os.path.exists(filename):
os.unlink(filename)
|
Remove converted docs.
|
train
|
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/setup.py#L102-L106
| null |
class Converter(object):
"""Documentation conversion class."""
def __init__(self):
"""Init."""
self.created = []
def dd_docs(self):
"""Copy and convert various documentation files."""
top = os.path.join(os.path.dirname(__file__))
doc = os.path.join(top, 'doc')
# Markdown to ronn to man page
man_md = os.path.join(doc, 'authprogs.md')
man_ronn = os.path.join(doc, 'authprogs.1.ronn')
man_1 = os.path.join(doc, 'authprogs.1')
# Create manpage
try:
if not os.path.exists(man_1):
shutil.copy(man_md, man_ronn)
self.created.append(man_ronn)
retval = subprocess.call(['ronn', '-r', man_ronn])
if retval != 0:
raise Exception('ronn man page conversion failed, '
'returned %s' % retval)
self.created.append(man_1)
except:
raise Exception('ronn required for manpage conversion - do you '
'have it installed?')
# Markdown files in docs dir get converted to .html
for name in MARKDOWN2HTML:
htmlfile = os.path.join(doc, '%s.html' % name)
if os.path.exists(htmlfile):
continue
target = open(htmlfile, 'w')
self.created.append(htmlfile)
stdout = runcmd(['python', '-m', 'markdown',
os.path.join(doc, '%s.md' % name)])[1]
if not stdout:
raise Exception('markdown conversion failed, no output.')
target.write(stdout)
target.close()
# Markdown files in top level just get renamed sans .md
for name in MARKDOWN2TEXT:
target = os.path.join(top, name)
if os.path.exists(target):
continue
source = os.path.join(top, '%s.md' % target)
shutil.copy(source, target)
self.created.append(target)
|
cidles/pressagio
|
src/pressagio/dbconnector.py
|
DatabaseConnector.create_ngram_table
|
python
|
def create_ngram_table(self, cardinality):
query = "CREATE TABLE IF NOT EXISTS _{0}_gram (".format(cardinality)
unique = ""
for i in reversed(range(cardinality)):
if i != 0:
unique += "word_{0}, ".format(i)
query += "word_{0} TEXT, ".format(i)
else:
unique += "word"
query += "word TEXT, count INTEGER, UNIQUE({0}) );".format(
unique)
self.execute_sql(query)
|
Creates a table for n-gram of a give cardinality. The table name is
constructed from this parameter, for example for cardinality `2` there
will be a table `_2_gram` created.
Parameters
----------
cardinality : int
The cardinality to create a table for.
|
train
|
https://github.com/cidles/pressagio/blob/2b3b89ae82316b929244e4c63e393682b2a57e57/src/pressagio/dbconnector.py#L56-L79
|
[
"def execute_sql(self):\n raise NotImplementedError(\"Method must be implemented\")\n",
"def execute_sql(self, query):\n \"\"\"\n Executes a given query string on an open sqlite database.\n\n \"\"\"\n c = self.con.cursor()\n c.execute(query)\n result = c.fetchall()\n return result\n",
"def execute_sql(self, query):\n \"\"\"\n Executes a given query string on an open postgres database.\n\n \"\"\"\n c = self.con.cursor()\n c.execute(query)\n result = []\n if c.rowcount > 0:\n try:\n result = c.fetchall()\n except psycopg2.ProgrammingError:\n pass\n return result\n"
] |
class DatabaseConnector(object):
"""
Base class for all database connectors.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, dbname, cardinality = 1):
"""
Constructor of the base class DababaseConnector.
Parameters
----------
dbname : str
path to the database file or database name
cardinality : int
default cardinality for n-grams
"""
self.cardinality = cardinality
self.dbname = dbname
self.lowercase = False
self.normalize = False
def delete_ngram_table(self, cardinality):
"""
Deletes the table for n-gram of a give cardinality. The table name is
constructed from this parameter, for example for cardinality `2` there
will be a table `_2_gram` deleted.
Parameters
----------
cardinality : int
The cardinality of the table to delete.
"""
query = "DROP TABLE IF EXISTS _{0}_gram;".format(cardinality)
self.execute_sql(query)
def create_index(self, cardinality):
"""
Create an index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality to create a index for.
"""
for i in reversed(range(cardinality)):
if i != 0:
query = "CREATE INDEX idx_{0}_gram_{1} ON _{0}_gram(word_{1});".format(cardinality, i)
self.execute_sql(query)
def delete_index(self, cardinality):
"""
Delete index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality of the index to delete.
"""
for i in reversed(range(cardinality)):
if i != 0:
query = "DROP INDEX IF EXISTS idx_{0}_gram_{1};".format(
cardinality, i)
self.execute_sql(query)
def create_unigram_table(self):
"""
Creates a table for n-grams of cardinality 1.
"""
self.create_ngram_table(1)
def create_bigram_table(self):
"""
Creates a table for n-grams of cardinality 2.
"""
self.create_ngram_table(2)
def create_trigram_table(self):
"""
Creates a table for n-grams of cardinality 3.
"""
self.create_ngram_table(3)
def ngrams(self, with_counts=False):
"""
Returns all ngrams that are in the table.
Parameters
----------
None
Returns
-------
ngrams : generator
A generator for ngram tuples.
"""
query = "SELECT "
for i in reversed(range(self.cardinality)):
if i != 0:
query += "word_{0}, ".format(i)
elif i == 0:
query += "word"
if with_counts:
query += ", count"
query += " FROM _{0}_gram;".format(self.cardinality)
result = self.execute_sql(query)
for row in result:
yield tuple(row)
def unigram_counts_sum(self):
query = "SELECT SUM(count) from _1_gram;"
result = self.execute_sql(query)
return self._extract_first_integer(result)
def ngram_count(self, ngram):
"""
Gets the count for a given ngram from the database.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
Returns
-------
count : int
The count of the ngram.
"""
query = "SELECT count FROM _{0}_gram".format(len(ngram))
query += self._build_where_clause(ngram)
query += ";"
result = self.execute_sql(query)
return self._extract_first_integer(result)
def ngram_like_table(self, ngram, limit = -1):
query = "SELECT {0} FROM _{1}_gram {2} ORDER BY count DESC".format(
self._build_select_like_clause(len(ngram)), len(ngram),
self._build_where_like_clause(ngram))
if limit < 0:
query += ";"
else:
query += " LIMIT {0};".format(limit)
return self.execute_sql(query)
def ngram_like_table_filtered(self, ngram, filter, limit = -1):
pass
def increment_ngram_count(self, ngram):
pass
def insert_ngram(self, ngram, count):
"""
Inserts a given n-gram with count into the database.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
count : int
The count for the given n-gram.
"""
query = "INSERT INTO _{0}_gram {1};".format(len(ngram),
self._build_values_clause(ngram, count))
self.execute_sql(query)
def update_ngram(self, ngram, count):
"""
Updates a given ngram in the database. The ngram has to be in the
database, otherwise this method will stop with an error.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
count : int
The count for the given n-gram.
"""
query = "UPDATE _{0}_gram SET count = {1}".format(len(ngram), count)
query += self._build_where_clause(ngram)
query += ";"
self.execute_sql(query)
def remove_ngram(self, ngram):
"""
Removes a given ngram from the databae. The ngram has to be in the
database, otherwise this method will stop with an error.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
"""
query = "DELETE FROM _{0}_gram".format(len(ngram))
query += self._build_where_clause(ngram)
query += ";"
self.execute_sql(query)
def open_database(self):
raise NotImplementedError("Method must be implemented")
def close_database(self):
raise NotImplementedError("Method must be implemented")
def execute_sql(self):
raise NotImplementedError("Method must be implemented")
############################################### Private methods
def _build_values_clause(self, ngram, count):
ngram_escaped = []
for n in ngram:
ngram_escaped.append(re_escape_singlequote.sub("''", n))
values_clause = "VALUES('"
values_clause += "', '".join(ngram_escaped)
values_clause += "', {0})".format(count)
return values_clause
def _build_where_clause(self, ngram):
where_clause = " WHERE"
for i in range(len(ngram)):
n = re_escape_singlequote.sub("''", ngram[i])
if i < (len(ngram) - 1):
where_clause += " word_{0} = '{1}' AND".format(
len(ngram) - i - 1, n)
else:
where_clause += " word = '{0}'".format(n)
return where_clause
def _build_select_like_clause(self, cardinality):
result = ""
for i in reversed(range(cardinality)):
if i != 0:
result += "word_{0}, ". format(i)
else:
result += "word, count"
return result
def _build_where_like_clause(self, ngram):
where_clause = " WHERE"
for i in range(len(ngram)):
if i < (len(ngram) - 1):
where_clause += " word_{0} = '{1}' AND".format(
len(ngram) - i - 1, ngram[i])
else:
where_clause += " word LIKE '{0}%'".format(ngram[-1])
return where_clause
def _extract_first_integer(self, table):
count = 0
if len(table) > 0:
if len(table[0]) > 0:
count = int(table[0][0])
if not count > 0:
count = 0
return count
|
cidles/pressagio
|
src/pressagio/dbconnector.py
|
DatabaseConnector.create_index
|
python
|
def create_index(self, cardinality):
for i in reversed(range(cardinality)):
if i != 0:
query = "CREATE INDEX idx_{0}_gram_{1} ON _{0}_gram(word_{1});".format(cardinality, i)
self.execute_sql(query)
|
Create an index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality to create a index for.
|
train
|
https://github.com/cidles/pressagio/blob/2b3b89ae82316b929244e4c63e393682b2a57e57/src/pressagio/dbconnector.py#L97-L110
|
[
"def execute_sql(self):\n raise NotImplementedError(\"Method must be implemented\")\n",
"def execute_sql(self, query):\n \"\"\"\n Executes a given query string on an open sqlite database.\n\n \"\"\"\n c = self.con.cursor()\n c.execute(query)\n result = c.fetchall()\n return result\n",
"def execute_sql(self, query):\n \"\"\"\n Executes a given query string on an open postgres database.\n\n \"\"\"\n c = self.con.cursor()\n c.execute(query)\n result = []\n if c.rowcount > 0:\n try:\n result = c.fetchall()\n except psycopg2.ProgrammingError:\n pass\n return result\n"
] |
class DatabaseConnector(object):
"""
Base class for all database connectors.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, dbname, cardinality = 1):
"""
Constructor of the base class DababaseConnector.
Parameters
----------
dbname : str
path to the database file or database name
cardinality : int
default cardinality for n-grams
"""
self.cardinality = cardinality
self.dbname = dbname
self.lowercase = False
self.normalize = False
def create_ngram_table(self, cardinality):
"""
Creates a table for n-gram of a give cardinality. The table name is
constructed from this parameter, for example for cardinality `2` there
will be a table `_2_gram` created.
Parameters
----------
cardinality : int
The cardinality to create a table for.
"""
query = "CREATE TABLE IF NOT EXISTS _{0}_gram (".format(cardinality)
unique = ""
for i in reversed(range(cardinality)):
if i != 0:
unique += "word_{0}, ".format(i)
query += "word_{0} TEXT, ".format(i)
else:
unique += "word"
query += "word TEXT, count INTEGER, UNIQUE({0}) );".format(
unique)
self.execute_sql(query)
def delete_ngram_table(self, cardinality):
"""
Deletes the table for n-gram of a give cardinality. The table name is
constructed from this parameter, for example for cardinality `2` there
will be a table `_2_gram` deleted.
Parameters
----------
cardinality : int
The cardinality of the table to delete.
"""
query = "DROP TABLE IF EXISTS _{0}_gram;".format(cardinality)
self.execute_sql(query)
def delete_index(self, cardinality):
"""
Delete index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality of the index to delete.
"""
for i in reversed(range(cardinality)):
if i != 0:
query = "DROP INDEX IF EXISTS idx_{0}_gram_{1};".format(
cardinality, i)
self.execute_sql(query)
def create_unigram_table(self):
"""
Creates a table for n-grams of cardinality 1.
"""
self.create_ngram_table(1)
def create_bigram_table(self):
"""
Creates a table for n-grams of cardinality 2.
"""
self.create_ngram_table(2)
def create_trigram_table(self):
"""
Creates a table for n-grams of cardinality 3.
"""
self.create_ngram_table(3)
def ngrams(self, with_counts=False):
"""
Returns all ngrams that are in the table.
Parameters
----------
None
Returns
-------
ngrams : generator
A generator for ngram tuples.
"""
query = "SELECT "
for i in reversed(range(self.cardinality)):
if i != 0:
query += "word_{0}, ".format(i)
elif i == 0:
query += "word"
if with_counts:
query += ", count"
query += " FROM _{0}_gram;".format(self.cardinality)
result = self.execute_sql(query)
for row in result:
yield tuple(row)
def unigram_counts_sum(self):
query = "SELECT SUM(count) from _1_gram;"
result = self.execute_sql(query)
return self._extract_first_integer(result)
def ngram_count(self, ngram):
"""
Gets the count for a given ngram from the database.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
Returns
-------
count : int
The count of the ngram.
"""
query = "SELECT count FROM _{0}_gram".format(len(ngram))
query += self._build_where_clause(ngram)
query += ";"
result = self.execute_sql(query)
return self._extract_first_integer(result)
def ngram_like_table(self, ngram, limit = -1):
query = "SELECT {0} FROM _{1}_gram {2} ORDER BY count DESC".format(
self._build_select_like_clause(len(ngram)), len(ngram),
self._build_where_like_clause(ngram))
if limit < 0:
query += ";"
else:
query += " LIMIT {0};".format(limit)
return self.execute_sql(query)
def ngram_like_table_filtered(self, ngram, filter, limit = -1):
pass
def increment_ngram_count(self, ngram):
pass
def insert_ngram(self, ngram, count):
"""
Inserts a given n-gram with count into the database.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
count : int
The count for the given n-gram.
"""
query = "INSERT INTO _{0}_gram {1};".format(len(ngram),
self._build_values_clause(ngram, count))
self.execute_sql(query)
def update_ngram(self, ngram, count):
"""
Updates a given ngram in the database. The ngram has to be in the
database, otherwise this method will stop with an error.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
count : int
The count for the given n-gram.
"""
query = "UPDATE _{0}_gram SET count = {1}".format(len(ngram), count)
query += self._build_where_clause(ngram)
query += ";"
self.execute_sql(query)
def remove_ngram(self, ngram):
"""
Removes a given ngram from the databae. The ngram has to be in the
database, otherwise this method will stop with an error.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
"""
query = "DELETE FROM _{0}_gram".format(len(ngram))
query += self._build_where_clause(ngram)
query += ";"
self.execute_sql(query)
def open_database(self):
raise NotImplementedError("Method must be implemented")
def close_database(self):
raise NotImplementedError("Method must be implemented")
def execute_sql(self):
raise NotImplementedError("Method must be implemented")
############################################### Private methods
def _build_values_clause(self, ngram, count):
ngram_escaped = []
for n in ngram:
ngram_escaped.append(re_escape_singlequote.sub("''", n))
values_clause = "VALUES('"
values_clause += "', '".join(ngram_escaped)
values_clause += "', {0})".format(count)
return values_clause
def _build_where_clause(self, ngram):
where_clause = " WHERE"
for i in range(len(ngram)):
n = re_escape_singlequote.sub("''", ngram[i])
if i < (len(ngram) - 1):
where_clause += " word_{0} = '{1}' AND".format(
len(ngram) - i - 1, n)
else:
where_clause += " word = '{0}'".format(n)
return where_clause
def _build_select_like_clause(self, cardinality):
result = ""
for i in reversed(range(cardinality)):
if i != 0:
result += "word_{0}, ". format(i)
else:
result += "word, count"
return result
def _build_where_like_clause(self, ngram):
where_clause = " WHERE"
for i in range(len(ngram)):
if i < (len(ngram) - 1):
where_clause += " word_{0} = '{1}' AND".format(
len(ngram) - i - 1, ngram[i])
else:
where_clause += " word LIKE '{0}%'".format(ngram[-1])
return where_clause
def _extract_first_integer(self, table):
count = 0
if len(table) > 0:
if len(table[0]) > 0:
count = int(table[0][0])
if not count > 0:
count = 0
return count
|
cidles/pressagio
|
src/pressagio/dbconnector.py
|
DatabaseConnector.ngrams
|
python
|
def ngrams(self, with_counts=False):
query = "SELECT "
for i in reversed(range(self.cardinality)):
if i != 0:
query += "word_{0}, ".format(i)
elif i == 0:
query += "word"
if with_counts:
query += ", count"
query += " FROM _{0}_gram;".format(self.cardinality)
result = self.execute_sql(query)
for row in result:
yield tuple(row)
|
Returns all ngrams that are in the table.
Parameters
----------
None
Returns
-------
ngrams : generator
A generator for ngram tuples.
|
train
|
https://github.com/cidles/pressagio/blob/2b3b89ae82316b929244e4c63e393682b2a57e57/src/pressagio/dbconnector.py#L150-L178
|
[
"def execute_sql(self):\n raise NotImplementedError(\"Method must be implemented\")\n",
"def execute_sql(self, query):\n \"\"\"\n Executes a given query string on an open sqlite database.\n\n \"\"\"\n c = self.con.cursor()\n c.execute(query)\n result = c.fetchall()\n return result\n",
"def execute_sql(self, query):\n \"\"\"\n Executes a given query string on an open postgres database.\n\n \"\"\"\n c = self.con.cursor()\n c.execute(query)\n result = []\n if c.rowcount > 0:\n try:\n result = c.fetchall()\n except psycopg2.ProgrammingError:\n pass\n return result\n"
] |
class DatabaseConnector(object):
"""
Base class for all database connectors.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, dbname, cardinality = 1):
"""
Constructor of the base class DababaseConnector.
Parameters
----------
dbname : str
path to the database file or database name
cardinality : int
default cardinality for n-grams
"""
self.cardinality = cardinality
self.dbname = dbname
self.lowercase = False
self.normalize = False
def create_ngram_table(self, cardinality):
"""
Creates a table for n-gram of a give cardinality. The table name is
constructed from this parameter, for example for cardinality `2` there
will be a table `_2_gram` created.
Parameters
----------
cardinality : int
The cardinality to create a table for.
"""
query = "CREATE TABLE IF NOT EXISTS _{0}_gram (".format(cardinality)
unique = ""
for i in reversed(range(cardinality)):
if i != 0:
unique += "word_{0}, ".format(i)
query += "word_{0} TEXT, ".format(i)
else:
unique += "word"
query += "word TEXT, count INTEGER, UNIQUE({0}) );".format(
unique)
self.execute_sql(query)
def delete_ngram_table(self, cardinality):
"""
Deletes the table for n-gram of a give cardinality. The table name is
constructed from this parameter, for example for cardinality `2` there
will be a table `_2_gram` deleted.
Parameters
----------
cardinality : int
The cardinality of the table to delete.
"""
query = "DROP TABLE IF EXISTS _{0}_gram;".format(cardinality)
self.execute_sql(query)
def create_index(self, cardinality):
"""
Create an index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality to create a index for.
"""
for i in reversed(range(cardinality)):
if i != 0:
query = "CREATE INDEX idx_{0}_gram_{1} ON _{0}_gram(word_{1});".format(cardinality, i)
self.execute_sql(query)
def delete_index(self, cardinality):
"""
Delete index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality of the index to delete.
"""
for i in reversed(range(cardinality)):
if i != 0:
query = "DROP INDEX IF EXISTS idx_{0}_gram_{1};".format(
cardinality, i)
self.execute_sql(query)
def create_unigram_table(self):
"""
Creates a table for n-grams of cardinality 1.
"""
self.create_ngram_table(1)
def create_bigram_table(self):
"""
Creates a table for n-grams of cardinality 2.
"""
self.create_ngram_table(2)
def create_trigram_table(self):
"""
Creates a table for n-grams of cardinality 3.
"""
self.create_ngram_table(3)
def unigram_counts_sum(self):
query = "SELECT SUM(count) from _1_gram;"
result = self.execute_sql(query)
return self._extract_first_integer(result)
def ngram_count(self, ngram):
"""
Gets the count for a given ngram from the database.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
Returns
-------
count : int
The count of the ngram.
"""
query = "SELECT count FROM _{0}_gram".format(len(ngram))
query += self._build_where_clause(ngram)
query += ";"
result = self.execute_sql(query)
return self._extract_first_integer(result)
def ngram_like_table(self, ngram, limit = -1):
query = "SELECT {0} FROM _{1}_gram {2} ORDER BY count DESC".format(
self._build_select_like_clause(len(ngram)), len(ngram),
self._build_where_like_clause(ngram))
if limit < 0:
query += ";"
else:
query += " LIMIT {0};".format(limit)
return self.execute_sql(query)
def ngram_like_table_filtered(self, ngram, filter, limit = -1):
pass
def increment_ngram_count(self, ngram):
pass
def insert_ngram(self, ngram, count):
"""
Inserts a given n-gram with count into the database.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
count : int
The count for the given n-gram.
"""
query = "INSERT INTO _{0}_gram {1};".format(len(ngram),
self._build_values_clause(ngram, count))
self.execute_sql(query)
def update_ngram(self, ngram, count):
"""
Updates a given ngram in the database. The ngram has to be in the
database, otherwise this method will stop with an error.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
count : int
The count for the given n-gram.
"""
query = "UPDATE _{0}_gram SET count = {1}".format(len(ngram), count)
query += self._build_where_clause(ngram)
query += ";"
self.execute_sql(query)
def remove_ngram(self, ngram):
"""
Removes a given ngram from the databae. The ngram has to be in the
database, otherwise this method will stop with an error.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
"""
query = "DELETE FROM _{0}_gram".format(len(ngram))
query += self._build_where_clause(ngram)
query += ";"
self.execute_sql(query)
def open_database(self):
raise NotImplementedError("Method must be implemented")
def close_database(self):
raise NotImplementedError("Method must be implemented")
def execute_sql(self):
raise NotImplementedError("Method must be implemented")
############################################### Private methods
def _build_values_clause(self, ngram, count):
ngram_escaped = []
for n in ngram:
ngram_escaped.append(re_escape_singlequote.sub("''", n))
values_clause = "VALUES('"
values_clause += "', '".join(ngram_escaped)
values_clause += "', {0})".format(count)
return values_clause
def _build_where_clause(self, ngram):
where_clause = " WHERE"
for i in range(len(ngram)):
n = re_escape_singlequote.sub("''", ngram[i])
if i < (len(ngram) - 1):
where_clause += " word_{0} = '{1}' AND".format(
len(ngram) - i - 1, n)
else:
where_clause += " word = '{0}'".format(n)
return where_clause
def _build_select_like_clause(self, cardinality):
result = ""
for i in reversed(range(cardinality)):
if i != 0:
result += "word_{0}, ". format(i)
else:
result += "word, count"
return result
def _build_where_like_clause(self, ngram):
where_clause = " WHERE"
for i in range(len(ngram)):
if i < (len(ngram) - 1):
where_clause += " word_{0} = '{1}' AND".format(
len(ngram) - i - 1, ngram[i])
else:
where_clause += " word LIKE '{0}%'".format(ngram[-1])
return where_clause
def _extract_first_integer(self, table):
count = 0
if len(table) > 0:
if len(table[0]) > 0:
count = int(table[0][0])
if not count > 0:
count = 0
return count
|
cidles/pressagio
|
src/pressagio/dbconnector.py
|
DatabaseConnector.ngram_count
|
python
|
def ngram_count(self, ngram):
query = "SELECT count FROM _{0}_gram".format(len(ngram))
query += self._build_where_clause(ngram)
query += ";"
result = self.execute_sql(query)
return self._extract_first_integer(result)
|
Gets the count for a given ngram from the database.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
Returns
-------
count : int
The count of the ngram.
|
train
|
https://github.com/cidles/pressagio/blob/2b3b89ae82316b929244e4c63e393682b2a57e57/src/pressagio/dbconnector.py#L185-L206
|
[
"def execute_sql(self):\n raise NotImplementedError(\"Method must be implemented\")\n",
"def _build_where_clause(self, ngram):\n where_clause = \" WHERE\"\n for i in range(len(ngram)):\n n = re_escape_singlequote.sub(\"''\", ngram[i])\n if i < (len(ngram) - 1):\n where_clause += \" word_{0} = '{1}' AND\".format(\n len(ngram) - i - 1, n)\n else:\n where_clause += \" word = '{0}'\".format(n)\n return where_clause\n",
"def _extract_first_integer(self, table):\n count = 0\n if len(table) > 0:\n if len(table[0]) > 0:\n count = int(table[0][0])\n\n if not count > 0:\n count = 0\n return count\n",
"def execute_sql(self, query):\n \"\"\"\n Executes a given query string on an open sqlite database.\n\n \"\"\"\n c = self.con.cursor()\n c.execute(query)\n result = c.fetchall()\n return result\n",
"def execute_sql(self, query):\n \"\"\"\n Executes a given query string on an open postgres database.\n\n \"\"\"\n c = self.con.cursor()\n c.execute(query)\n result = []\n if c.rowcount > 0:\n try:\n result = c.fetchall()\n except psycopg2.ProgrammingError:\n pass\n return result\n"
] |
class DatabaseConnector(object):
"""
Base class for all database connectors.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, dbname, cardinality = 1):
"""
Constructor of the base class DababaseConnector.
Parameters
----------
dbname : str
path to the database file or database name
cardinality : int
default cardinality for n-grams
"""
self.cardinality = cardinality
self.dbname = dbname
self.lowercase = False
self.normalize = False
def create_ngram_table(self, cardinality):
"""
Creates a table for n-gram of a give cardinality. The table name is
constructed from this parameter, for example for cardinality `2` there
will be a table `_2_gram` created.
Parameters
----------
cardinality : int
The cardinality to create a table for.
"""
query = "CREATE TABLE IF NOT EXISTS _{0}_gram (".format(cardinality)
unique = ""
for i in reversed(range(cardinality)):
if i != 0:
unique += "word_{0}, ".format(i)
query += "word_{0} TEXT, ".format(i)
else:
unique += "word"
query += "word TEXT, count INTEGER, UNIQUE({0}) );".format(
unique)
self.execute_sql(query)
def delete_ngram_table(self, cardinality):
"""
Deletes the table for n-gram of a give cardinality. The table name is
constructed from this parameter, for example for cardinality `2` there
will be a table `_2_gram` deleted.
Parameters
----------
cardinality : int
The cardinality of the table to delete.
"""
query = "DROP TABLE IF EXISTS _{0}_gram;".format(cardinality)
self.execute_sql(query)
def create_index(self, cardinality):
"""
Create an index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality to create a index for.
"""
for i in reversed(range(cardinality)):
if i != 0:
query = "CREATE INDEX idx_{0}_gram_{1} ON _{0}_gram(word_{1});".format(cardinality, i)
self.execute_sql(query)
def delete_index(self, cardinality):
"""
Delete index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality of the index to delete.
"""
for i in reversed(range(cardinality)):
if i != 0:
query = "DROP INDEX IF EXISTS idx_{0}_gram_{1};".format(
cardinality, i)
self.execute_sql(query)
def create_unigram_table(self):
"""
Creates a table for n-grams of cardinality 1.
"""
self.create_ngram_table(1)
def create_bigram_table(self):
"""
Creates a table for n-grams of cardinality 2.
"""
self.create_ngram_table(2)
def create_trigram_table(self):
"""
Creates a table for n-grams of cardinality 3.
"""
self.create_ngram_table(3)
def ngrams(self, with_counts=False):
"""
Returns all ngrams that are in the table.
Parameters
----------
None
Returns
-------
ngrams : generator
A generator for ngram tuples.
"""
query = "SELECT "
for i in reversed(range(self.cardinality)):
if i != 0:
query += "word_{0}, ".format(i)
elif i == 0:
query += "word"
if with_counts:
query += ", count"
query += " FROM _{0}_gram;".format(self.cardinality)
result = self.execute_sql(query)
for row in result:
yield tuple(row)
def unigram_counts_sum(self):
query = "SELECT SUM(count) from _1_gram;"
result = self.execute_sql(query)
return self._extract_first_integer(result)
def ngram_like_table(self, ngram, limit = -1):
query = "SELECT {0} FROM _{1}_gram {2} ORDER BY count DESC".format(
self._build_select_like_clause(len(ngram)), len(ngram),
self._build_where_like_clause(ngram))
if limit < 0:
query += ";"
else:
query += " LIMIT {0};".format(limit)
return self.execute_sql(query)
def ngram_like_table_filtered(self, ngram, filter, limit = -1):
pass
def increment_ngram_count(self, ngram):
pass
def insert_ngram(self, ngram, count):
"""
Inserts a given n-gram with count into the database.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
count : int
The count for the given n-gram.
"""
query = "INSERT INTO _{0}_gram {1};".format(len(ngram),
self._build_values_clause(ngram, count))
self.execute_sql(query)
def update_ngram(self, ngram, count):
"""
Updates a given ngram in the database. The ngram has to be in the
database, otherwise this method will stop with an error.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
count : int
The count for the given n-gram.
"""
query = "UPDATE _{0}_gram SET count = {1}".format(len(ngram), count)
query += self._build_where_clause(ngram)
query += ";"
self.execute_sql(query)
def remove_ngram(self, ngram):
"""
Removes a given ngram from the databae. The ngram has to be in the
database, otherwise this method will stop with an error.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
"""
query = "DELETE FROM _{0}_gram".format(len(ngram))
query += self._build_where_clause(ngram)
query += ";"
self.execute_sql(query)
def open_database(self):
raise NotImplementedError("Method must be implemented")
def close_database(self):
raise NotImplementedError("Method must be implemented")
def execute_sql(self):
raise NotImplementedError("Method must be implemented")
############################################### Private methods
def _build_values_clause(self, ngram, count):
ngram_escaped = []
for n in ngram:
ngram_escaped.append(re_escape_singlequote.sub("''", n))
values_clause = "VALUES('"
values_clause += "', '".join(ngram_escaped)
values_clause += "', {0})".format(count)
return values_clause
def _build_where_clause(self, ngram):
where_clause = " WHERE"
for i in range(len(ngram)):
n = re_escape_singlequote.sub("''", ngram[i])
if i < (len(ngram) - 1):
where_clause += " word_{0} = '{1}' AND".format(
len(ngram) - i - 1, n)
else:
where_clause += " word = '{0}'".format(n)
return where_clause
def _build_select_like_clause(self, cardinality):
result = ""
for i in reversed(range(cardinality)):
if i != 0:
result += "word_{0}, ". format(i)
else:
result += "word, count"
return result
def _build_where_like_clause(self, ngram):
where_clause = " WHERE"
for i in range(len(ngram)):
if i < (len(ngram) - 1):
where_clause += " word_{0} = '{1}' AND".format(
len(ngram) - i - 1, ngram[i])
else:
where_clause += " word LIKE '{0}%'".format(ngram[-1])
return where_clause
def _extract_first_integer(self, table):
count = 0
if len(table) > 0:
if len(table[0]) > 0:
count = int(table[0][0])
if not count > 0:
count = 0
return count
|
cidles/pressagio
|
src/pressagio/dbconnector.py
|
DatabaseConnector.insert_ngram
|
python
|
def insert_ngram(self, ngram, count):
query = "INSERT INTO _{0}_gram {1};".format(len(ngram),
self._build_values_clause(ngram, count))
self.execute_sql(query)
|
Inserts a given n-gram with count into the database.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
count : int
The count for the given n-gram.
|
train
|
https://github.com/cidles/pressagio/blob/2b3b89ae82316b929244e4c63e393682b2a57e57/src/pressagio/dbconnector.py#L225-L239
|
[
"def execute_sql(self):\n raise NotImplementedError(\"Method must be implemented\")\n",
"def _build_values_clause(self, ngram, count):\n ngram_escaped = []\n for n in ngram:\n ngram_escaped.append(re_escape_singlequote.sub(\"''\", n))\n\n values_clause = \"VALUES('\"\n values_clause += \"', '\".join(ngram_escaped)\n values_clause += \"', {0})\".format(count)\n return values_clause\n",
"def execute_sql(self, query):\n \"\"\"\n Executes a given query string on an open sqlite database.\n\n \"\"\"\n c = self.con.cursor()\n c.execute(query)\n result = c.fetchall()\n return result\n",
"def execute_sql(self, query):\n \"\"\"\n Executes a given query string on an open postgres database.\n\n \"\"\"\n c = self.con.cursor()\n c.execute(query)\n result = []\n if c.rowcount > 0:\n try:\n result = c.fetchall()\n except psycopg2.ProgrammingError:\n pass\n return result\n"
] |
class DatabaseConnector(object):
"""
Base class for all database connectors.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, dbname, cardinality = 1):
"""
Constructor of the base class DababaseConnector.
Parameters
----------
dbname : str
path to the database file or database name
cardinality : int
default cardinality for n-grams
"""
self.cardinality = cardinality
self.dbname = dbname
self.lowercase = False
self.normalize = False
def create_ngram_table(self, cardinality):
"""
Creates a table for n-gram of a give cardinality. The table name is
constructed from this parameter, for example for cardinality `2` there
will be a table `_2_gram` created.
Parameters
----------
cardinality : int
The cardinality to create a table for.
"""
query = "CREATE TABLE IF NOT EXISTS _{0}_gram (".format(cardinality)
unique = ""
for i in reversed(range(cardinality)):
if i != 0:
unique += "word_{0}, ".format(i)
query += "word_{0} TEXT, ".format(i)
else:
unique += "word"
query += "word TEXT, count INTEGER, UNIQUE({0}) );".format(
unique)
self.execute_sql(query)
def delete_ngram_table(self, cardinality):
"""
Deletes the table for n-gram of a give cardinality. The table name is
constructed from this parameter, for example for cardinality `2` there
will be a table `_2_gram` deleted.
Parameters
----------
cardinality : int
The cardinality of the table to delete.
"""
query = "DROP TABLE IF EXISTS _{0}_gram;".format(cardinality)
self.execute_sql(query)
def create_index(self, cardinality):
"""
Create an index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality to create a index for.
"""
for i in reversed(range(cardinality)):
if i != 0:
query = "CREATE INDEX idx_{0}_gram_{1} ON _{0}_gram(word_{1});".format(cardinality, i)
self.execute_sql(query)
def delete_index(self, cardinality):
"""
Delete index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality of the index to delete.
"""
for i in reversed(range(cardinality)):
if i != 0:
query = "DROP INDEX IF EXISTS idx_{0}_gram_{1};".format(
cardinality, i)
self.execute_sql(query)
def create_unigram_table(self):
"""
Creates a table for n-grams of cardinality 1.
"""
self.create_ngram_table(1)
def create_bigram_table(self):
"""
Creates a table for n-grams of cardinality 2.
"""
self.create_ngram_table(2)
def create_trigram_table(self):
"""
Creates a table for n-grams of cardinality 3.
"""
self.create_ngram_table(3)
def ngrams(self, with_counts=False):
"""
Returns all ngrams that are in the table.
Parameters
----------
None
Returns
-------
ngrams : generator
A generator for ngram tuples.
"""
query = "SELECT "
for i in reversed(range(self.cardinality)):
if i != 0:
query += "word_{0}, ".format(i)
elif i == 0:
query += "word"
if with_counts:
query += ", count"
query += " FROM _{0}_gram;".format(self.cardinality)
result = self.execute_sql(query)
for row in result:
yield tuple(row)
def unigram_counts_sum(self):
query = "SELECT SUM(count) from _1_gram;"
result = self.execute_sql(query)
return self._extract_first_integer(result)
def ngram_count(self, ngram):
"""
Gets the count for a given ngram from the database.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
Returns
-------
count : int
The count of the ngram.
"""
query = "SELECT count FROM _{0}_gram".format(len(ngram))
query += self._build_where_clause(ngram)
query += ";"
result = self.execute_sql(query)
return self._extract_first_integer(result)
def ngram_like_table(self, ngram, limit = -1):
query = "SELECT {0} FROM _{1}_gram {2} ORDER BY count DESC".format(
self._build_select_like_clause(len(ngram)), len(ngram),
self._build_where_like_clause(ngram))
if limit < 0:
query += ";"
else:
query += " LIMIT {0};".format(limit)
return self.execute_sql(query)
def ngram_like_table_filtered(self, ngram, filter, limit = -1):
pass
def increment_ngram_count(self, ngram):
pass
def update_ngram(self, ngram, count):
"""
Updates a given ngram in the database. The ngram has to be in the
database, otherwise this method will stop with an error.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
count : int
The count for the given n-gram.
"""
query = "UPDATE _{0}_gram SET count = {1}".format(len(ngram), count)
query += self._build_where_clause(ngram)
query += ";"
self.execute_sql(query)
def remove_ngram(self, ngram):
"""
Removes a given ngram from the databae. The ngram has to be in the
database, otherwise this method will stop with an error.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
"""
query = "DELETE FROM _{0}_gram".format(len(ngram))
query += self._build_where_clause(ngram)
query += ";"
self.execute_sql(query)
def open_database(self):
raise NotImplementedError("Method must be implemented")
def close_database(self):
raise NotImplementedError("Method must be implemented")
def execute_sql(self):
raise NotImplementedError("Method must be implemented")
############################################### Private methods
def _build_values_clause(self, ngram, count):
ngram_escaped = []
for n in ngram:
ngram_escaped.append(re_escape_singlequote.sub("''", n))
values_clause = "VALUES('"
values_clause += "', '".join(ngram_escaped)
values_clause += "', {0})".format(count)
return values_clause
def _build_where_clause(self, ngram):
where_clause = " WHERE"
for i in range(len(ngram)):
n = re_escape_singlequote.sub("''", ngram[i])
if i < (len(ngram) - 1):
where_clause += " word_{0} = '{1}' AND".format(
len(ngram) - i - 1, n)
else:
where_clause += " word = '{0}'".format(n)
return where_clause
def _build_select_like_clause(self, cardinality):
result = ""
for i in reversed(range(cardinality)):
if i != 0:
result += "word_{0}, ". format(i)
else:
result += "word, count"
return result
def _build_where_like_clause(self, ngram):
where_clause = " WHERE"
for i in range(len(ngram)):
if i < (len(ngram) - 1):
where_clause += " word_{0} = '{1}' AND".format(
len(ngram) - i - 1, ngram[i])
else:
where_clause += " word LIKE '{0}%'".format(ngram[-1])
return where_clause
def _extract_first_integer(self, table):
count = 0
if len(table) > 0:
if len(table[0]) > 0:
count = int(table[0][0])
if not count > 0:
count = 0
return count
|
cidles/pressagio
|
src/pressagio/dbconnector.py
|
DatabaseConnector.update_ngram
|
python
|
def update_ngram(self, ngram, count):
query = "UPDATE _{0}_gram SET count = {1}".format(len(ngram), count)
query += self._build_where_clause(ngram)
query += ";"
self.execute_sql(query)
|
Updates a given ngram in the database. The ngram has to be in the
database, otherwise this method will stop with an error.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
count : int
The count for the given n-gram.
|
train
|
https://github.com/cidles/pressagio/blob/2b3b89ae82316b929244e4c63e393682b2a57e57/src/pressagio/dbconnector.py#L241-L257
|
[
"def execute_sql(self):\n raise NotImplementedError(\"Method must be implemented\")\n",
"def _build_where_clause(self, ngram):\n where_clause = \" WHERE\"\n for i in range(len(ngram)):\n n = re_escape_singlequote.sub(\"''\", ngram[i])\n if i < (len(ngram) - 1):\n where_clause += \" word_{0} = '{1}' AND\".format(\n len(ngram) - i - 1, n)\n else:\n where_clause += \" word = '{0}'\".format(n)\n return where_clause\n",
"def execute_sql(self, query):\n \"\"\"\n Executes a given query string on an open sqlite database.\n\n \"\"\"\n c = self.con.cursor()\n c.execute(query)\n result = c.fetchall()\n return result\n",
"def execute_sql(self, query):\n \"\"\"\n Executes a given query string on an open postgres database.\n\n \"\"\"\n c = self.con.cursor()\n c.execute(query)\n result = []\n if c.rowcount > 0:\n try:\n result = c.fetchall()\n except psycopg2.ProgrammingError:\n pass\n return result\n"
] |
class DatabaseConnector(object):
"""
Base class for all database connectors.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, dbname, cardinality = 1):
"""
Constructor of the base class DababaseConnector.
Parameters
----------
dbname : str
path to the database file or database name
cardinality : int
default cardinality for n-grams
"""
self.cardinality = cardinality
self.dbname = dbname
self.lowercase = False
self.normalize = False
def create_ngram_table(self, cardinality):
"""
Creates a table for n-gram of a give cardinality. The table name is
constructed from this parameter, for example for cardinality `2` there
will be a table `_2_gram` created.
Parameters
----------
cardinality : int
The cardinality to create a table for.
"""
query = "CREATE TABLE IF NOT EXISTS _{0}_gram (".format(cardinality)
unique = ""
for i in reversed(range(cardinality)):
if i != 0:
unique += "word_{0}, ".format(i)
query += "word_{0} TEXT, ".format(i)
else:
unique += "word"
query += "word TEXT, count INTEGER, UNIQUE({0}) );".format(
unique)
self.execute_sql(query)
def delete_ngram_table(self, cardinality):
"""
Deletes the table for n-gram of a give cardinality. The table name is
constructed from this parameter, for example for cardinality `2` there
will be a table `_2_gram` deleted.
Parameters
----------
cardinality : int
The cardinality of the table to delete.
"""
query = "DROP TABLE IF EXISTS _{0}_gram;".format(cardinality)
self.execute_sql(query)
def create_index(self, cardinality):
"""
Create an index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality to create a index for.
"""
for i in reversed(range(cardinality)):
if i != 0:
query = "CREATE INDEX idx_{0}_gram_{1} ON _{0}_gram(word_{1});".format(cardinality, i)
self.execute_sql(query)
def delete_index(self, cardinality):
"""
Delete index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality of the index to delete.
"""
for i in reversed(range(cardinality)):
if i != 0:
query = "DROP INDEX IF EXISTS idx_{0}_gram_{1};".format(
cardinality, i)
self.execute_sql(query)
def create_unigram_table(self):
"""
Creates a table for n-grams of cardinality 1.
"""
self.create_ngram_table(1)
def create_bigram_table(self):
"""
Creates a table for n-grams of cardinality 2.
"""
self.create_ngram_table(2)
def create_trigram_table(self):
"""
Creates a table for n-grams of cardinality 3.
"""
self.create_ngram_table(3)
def ngrams(self, with_counts=False):
"""
Returns all ngrams that are in the table.
Parameters
----------
None
Returns
-------
ngrams : generator
A generator for ngram tuples.
"""
query = "SELECT "
for i in reversed(range(self.cardinality)):
if i != 0:
query += "word_{0}, ".format(i)
elif i == 0:
query += "word"
if with_counts:
query += ", count"
query += " FROM _{0}_gram;".format(self.cardinality)
result = self.execute_sql(query)
for row in result:
yield tuple(row)
def unigram_counts_sum(self):
query = "SELECT SUM(count) from _1_gram;"
result = self.execute_sql(query)
return self._extract_first_integer(result)
def ngram_count(self, ngram):
"""
Gets the count for a given ngram from the database.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
Returns
-------
count : int
The count of the ngram.
"""
query = "SELECT count FROM _{0}_gram".format(len(ngram))
query += self._build_where_clause(ngram)
query += ";"
result = self.execute_sql(query)
return self._extract_first_integer(result)
def ngram_like_table(self, ngram, limit = -1):
query = "SELECT {0} FROM _{1}_gram {2} ORDER BY count DESC".format(
self._build_select_like_clause(len(ngram)), len(ngram),
self._build_where_like_clause(ngram))
if limit < 0:
query += ";"
else:
query += " LIMIT {0};".format(limit)
return self.execute_sql(query)
def ngram_like_table_filtered(self, ngram, filter, limit = -1):
pass
def increment_ngram_count(self, ngram):
pass
def insert_ngram(self, ngram, count):
"""
Inserts a given n-gram with count into the database.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
count : int
The count for the given n-gram.
"""
query = "INSERT INTO _{0}_gram {1};".format(len(ngram),
self._build_values_clause(ngram, count))
self.execute_sql(query)
def remove_ngram(self, ngram):
"""
Removes a given ngram from the databae. The ngram has to be in the
database, otherwise this method will stop with an error.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
"""
query = "DELETE FROM _{0}_gram".format(len(ngram))
query += self._build_where_clause(ngram)
query += ";"
self.execute_sql(query)
def open_database(self):
raise NotImplementedError("Method must be implemented")
def close_database(self):
raise NotImplementedError("Method must be implemented")
def execute_sql(self):
raise NotImplementedError("Method must be implemented")
############################################### Private methods
def _build_values_clause(self, ngram, count):
ngram_escaped = []
for n in ngram:
ngram_escaped.append(re_escape_singlequote.sub("''", n))
values_clause = "VALUES('"
values_clause += "', '".join(ngram_escaped)
values_clause += "', {0})".format(count)
return values_clause
def _build_where_clause(self, ngram):
where_clause = " WHERE"
for i in range(len(ngram)):
n = re_escape_singlequote.sub("''", ngram[i])
if i < (len(ngram) - 1):
where_clause += " word_{0} = '{1}' AND".format(
len(ngram) - i - 1, n)
else:
where_clause += " word = '{0}'".format(n)
return where_clause
def _build_select_like_clause(self, cardinality):
result = ""
for i in reversed(range(cardinality)):
if i != 0:
result += "word_{0}, ". format(i)
else:
result += "word, count"
return result
def _build_where_like_clause(self, ngram):
where_clause = " WHERE"
for i in range(len(ngram)):
if i < (len(ngram) - 1):
where_clause += " word_{0} = '{1}' AND".format(
len(ngram) - i - 1, ngram[i])
else:
where_clause += " word LIKE '{0}%'".format(ngram[-1])
return where_clause
def _extract_first_integer(self, table):
count = 0
if len(table) > 0:
if len(table[0]) > 0:
count = int(table[0][0])
if not count > 0:
count = 0
return count
|
cidles/pressagio
|
src/pressagio/dbconnector.py
|
DatabaseConnector.remove_ngram
|
python
|
def remove_ngram(self, ngram):
query = "DELETE FROM _{0}_gram".format(len(ngram))
query += self._build_where_clause(ngram)
query += ";"
self.execute_sql(query)
|
Removes a given ngram from the databae. The ngram has to be in the
database, otherwise this method will stop with an error.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
|
train
|
https://github.com/cidles/pressagio/blob/2b3b89ae82316b929244e4c63e393682b2a57e57/src/pressagio/dbconnector.py#L259-L273
|
[
"def execute_sql(self):\n raise NotImplementedError(\"Method must be implemented\")\n",
"def _build_where_clause(self, ngram):\n where_clause = \" WHERE\"\n for i in range(len(ngram)):\n n = re_escape_singlequote.sub(\"''\", ngram[i])\n if i < (len(ngram) - 1):\n where_clause += \" word_{0} = '{1}' AND\".format(\n len(ngram) - i - 1, n)\n else:\n where_clause += \" word = '{0}'\".format(n)\n return where_clause\n",
"def execute_sql(self, query):\n \"\"\"\n Executes a given query string on an open sqlite database.\n\n \"\"\"\n c = self.con.cursor()\n c.execute(query)\n result = c.fetchall()\n return result\n",
"def execute_sql(self, query):\n \"\"\"\n Executes a given query string on an open postgres database.\n\n \"\"\"\n c = self.con.cursor()\n c.execute(query)\n result = []\n if c.rowcount > 0:\n try:\n result = c.fetchall()\n except psycopg2.ProgrammingError:\n pass\n return result\n"
] |
class DatabaseConnector(object):
"""
Base class for all database connectors.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, dbname, cardinality = 1):
"""
Constructor of the base class DababaseConnector.
Parameters
----------
dbname : str
path to the database file or database name
cardinality : int
default cardinality for n-grams
"""
self.cardinality = cardinality
self.dbname = dbname
self.lowercase = False
self.normalize = False
def create_ngram_table(self, cardinality):
"""
Creates a table for n-gram of a give cardinality. The table name is
constructed from this parameter, for example for cardinality `2` there
will be a table `_2_gram` created.
Parameters
----------
cardinality : int
The cardinality to create a table for.
"""
query = "CREATE TABLE IF NOT EXISTS _{0}_gram (".format(cardinality)
unique = ""
for i in reversed(range(cardinality)):
if i != 0:
unique += "word_{0}, ".format(i)
query += "word_{0} TEXT, ".format(i)
else:
unique += "word"
query += "word TEXT, count INTEGER, UNIQUE({0}) );".format(
unique)
self.execute_sql(query)
def delete_ngram_table(self, cardinality):
"""
Deletes the table for n-gram of a give cardinality. The table name is
constructed from this parameter, for example for cardinality `2` there
will be a table `_2_gram` deleted.
Parameters
----------
cardinality : int
The cardinality of the table to delete.
"""
query = "DROP TABLE IF EXISTS _{0}_gram;".format(cardinality)
self.execute_sql(query)
def create_index(self, cardinality):
"""
Create an index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality to create a index for.
"""
for i in reversed(range(cardinality)):
if i != 0:
query = "CREATE INDEX idx_{0}_gram_{1} ON _{0}_gram(word_{1});".format(cardinality, i)
self.execute_sql(query)
def delete_index(self, cardinality):
"""
Delete index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality of the index to delete.
"""
for i in reversed(range(cardinality)):
if i != 0:
query = "DROP INDEX IF EXISTS idx_{0}_gram_{1};".format(
cardinality, i)
self.execute_sql(query)
def create_unigram_table(self):
"""
Creates a table for n-grams of cardinality 1.
"""
self.create_ngram_table(1)
def create_bigram_table(self):
"""
Creates a table for n-grams of cardinality 2.
"""
self.create_ngram_table(2)
def create_trigram_table(self):
"""
Creates a table for n-grams of cardinality 3.
"""
self.create_ngram_table(3)
def ngrams(self, with_counts=False):
"""
Returns all ngrams that are in the table.
Parameters
----------
None
Returns
-------
ngrams : generator
A generator for ngram tuples.
"""
query = "SELECT "
for i in reversed(range(self.cardinality)):
if i != 0:
query += "word_{0}, ".format(i)
elif i == 0:
query += "word"
if with_counts:
query += ", count"
query += " FROM _{0}_gram;".format(self.cardinality)
result = self.execute_sql(query)
for row in result:
yield tuple(row)
def unigram_counts_sum(self):
query = "SELECT SUM(count) from _1_gram;"
result = self.execute_sql(query)
return self._extract_first_integer(result)
def ngram_count(self, ngram):
"""
Gets the count for a given ngram from the database.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
Returns
-------
count : int
The count of the ngram.
"""
query = "SELECT count FROM _{0}_gram".format(len(ngram))
query += self._build_where_clause(ngram)
query += ";"
result = self.execute_sql(query)
return self._extract_first_integer(result)
def ngram_like_table(self, ngram, limit = -1):
query = "SELECT {0} FROM _{1}_gram {2} ORDER BY count DESC".format(
self._build_select_like_clause(len(ngram)), len(ngram),
self._build_where_like_clause(ngram))
if limit < 0:
query += ";"
else:
query += " LIMIT {0};".format(limit)
return self.execute_sql(query)
def ngram_like_table_filtered(self, ngram, filter, limit = -1):
pass
def increment_ngram_count(self, ngram):
pass
def insert_ngram(self, ngram, count):
"""
Inserts a given n-gram with count into the database.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
count : int
The count for the given n-gram.
"""
query = "INSERT INTO _{0}_gram {1};".format(len(ngram),
self._build_values_clause(ngram, count))
self.execute_sql(query)
def update_ngram(self, ngram, count):
"""
Updates a given ngram in the database. The ngram has to be in the
database, otherwise this method will stop with an error.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
count : int
The count for the given n-gram.
"""
query = "UPDATE _{0}_gram SET count = {1}".format(len(ngram), count)
query += self._build_where_clause(ngram)
query += ";"
self.execute_sql(query)
def open_database(self):
raise NotImplementedError("Method must be implemented")
def close_database(self):
raise NotImplementedError("Method must be implemented")
def execute_sql(self):
raise NotImplementedError("Method must be implemented")
############################################### Private methods
def _build_values_clause(self, ngram, count):
ngram_escaped = []
for n in ngram:
ngram_escaped.append(re_escape_singlequote.sub("''", n))
values_clause = "VALUES('"
values_clause += "', '".join(ngram_escaped)
values_clause += "', {0})".format(count)
return values_clause
def _build_where_clause(self, ngram):
where_clause = " WHERE"
for i in range(len(ngram)):
n = re_escape_singlequote.sub("''", ngram[i])
if i < (len(ngram) - 1):
where_clause += " word_{0} = '{1}' AND".format(
len(ngram) - i - 1, n)
else:
where_clause += " word = '{0}'".format(n)
return where_clause
def _build_select_like_clause(self, cardinality):
result = ""
for i in reversed(range(cardinality)):
if i != 0:
result += "word_{0}, ". format(i)
else:
result += "word, count"
return result
def _build_where_like_clause(self, ngram):
where_clause = " WHERE"
for i in range(len(ngram)):
if i < (len(ngram) - 1):
where_clause += " word_{0} = '{1}' AND".format(
len(ngram) - i - 1, ngram[i])
else:
where_clause += " word LIKE '{0}%'".format(ngram[-1])
return where_clause
def _extract_first_integer(self, table):
count = 0
if len(table) > 0:
if len(table[0]) > 0:
count = int(table[0][0])
if not count > 0:
count = 0
return count
|
cidles/pressagio
|
src/pressagio/dbconnector.py
|
SqliteDatabaseConnector.execute_sql
|
python
|
def execute_sql(self, query):
c = self.con.cursor()
c.execute(query)
result = c.fetchall()
return result
|
Executes a given query string on an open sqlite database.
|
train
|
https://github.com/cidles/pressagio/blob/2b3b89ae82316b929244e4c63e393682b2a57e57/src/pressagio/dbconnector.py#L381-L389
| null |
class SqliteDatabaseConnector(DatabaseConnector):
"""
Database connector for sqlite databases.
"""
def __init__(self, dbname, cardinality = 1):
"""
Constructor for the sqlite database connector.
Parameters
----------
dbname : str
path to the database file
cardinality : int
default cardinality for n-grams
"""
DatabaseConnector.__init__(self, dbname, cardinality)
self.con = None
self.open_database()
def commit(self):
"""
Sends a commit to the database.
"""
self.con.commit()
def open_database(self):
"""
Opens the sqlite database.
"""
self.con = sqlite3.connect(self.dbname)
def close_database(self):
"""
Closes the sqlite database.
"""
if self.con:
self.con.close()
|
cidles/pressagio
|
src/pressagio/dbconnector.py
|
PostgresDatabaseConnector.create_database
|
python
|
def create_database(self):
if not self._database_exists():
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
con.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
query = "CREATE DATABASE {0};".format(self.dbname)
c = con.cursor()
c.execute(query)
con.close()
if self.normalize:
self.open_database()
query = "CREATE EXTENSION IF NOT EXISTS \"plperlu\";"
self.execute_sql(query)
# query = """CREATE OR REPLACE FUNCTION normalize(str text)
#RETURNS text
#AS $$
#import unicodedata
#return ''.join(c for c in unicodedata.normalize('NFKD', str)
#if unicodedata.category(c) != 'Mn')
#$$ LANGUAGE plpython3u IMMUTABLE;"""
# query = """CREATE OR REPLACE FUNCTION normalize(mystr text)
# RETURNS text
# AS $$
# from unidecode import unidecode
# return unidecode(mystr.decode("utf-8"))
# $$ LANGUAGE plpythonu IMMUTABLE;"""
query = """CREATE OR REPLACE FUNCTION normalize(text)
RETURNS text
AS $$
use Text::Unidecode;
return unidecode(shift);
$$ LANGUAGE plperlu IMMUTABLE;"""
self.execute_sql(query)
self.commit()
self.close_database()
|
Creates an empty database if not exists.
|
train
|
https://github.com/cidles/pressagio/blob/2b3b89ae82316b929244e4c63e393682b2a57e57/src/pressagio/dbconnector.py#L428-L469
|
[
"def _database_exists(self):\n \"\"\"\n Check if the database exists.\n\n \"\"\"\n con = psycopg2.connect(host=self.host, database=\"postgres\",\n user=self.user, password=self.password, port=self.port)\n query_check = \"select datname from pg_catalog.pg_database\"\n query_check += \" where datname = '{0}';\".format(self.dbname)\n c = con.cursor()\n c.execute(query_check)\n result = c.fetchall()\n if len(result) > 0:\n return True\n return False\n"
] |
class PostgresDatabaseConnector(DatabaseConnector):
"""
Database connector for postgres databases.
"""
def __init__(self, dbname, cardinality = 1, host = "localhost", port = 5432,
user = "postgres", password = None, connection = None):
"""
Constructor for the postgres database connector.
Parameters
----------
dbname : str
the database name
cardinality : int
default cardinality for n-grams
host : str
hostname of the postgres database
port : int
port number of the postgres database
user : str
user name for the postgres database
password: str
user password for the postgres database
connection : connection
an open database connection
"""
DatabaseConnector.__init__(self, dbname, cardinality)
self.con = connection
self.host = host
self.port = port
self.user = user
self.password = password
def create_database(self):
"""
Creates an empty database if not exists.
"""
if not self._database_exists():
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
con.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
query = "CREATE DATABASE {0};".format(self.dbname)
c = con.cursor()
c.execute(query)
con.close()
if self.normalize:
self.open_database()
query = "CREATE EXTENSION IF NOT EXISTS \"plperlu\";"
self.execute_sql(query)
# query = """CREATE OR REPLACE FUNCTION normalize(str text)
#RETURNS text
#AS $$
#import unicodedata
#return ''.join(c for c in unicodedata.normalize('NFKD', str)
#if unicodedata.category(c) != 'Mn')
#$$ LANGUAGE plpython3u IMMUTABLE;"""
# query = """CREATE OR REPLACE FUNCTION normalize(mystr text)
# RETURNS text
# AS $$
# from unidecode import unidecode
# return unidecode(mystr.decode("utf-8"))
# $$ LANGUAGE plpythonu IMMUTABLE;"""
query = """CREATE OR REPLACE FUNCTION normalize(text)
RETURNS text
AS $$
use Text::Unidecode;
return unidecode(shift);
$$ LANGUAGE plperlu IMMUTABLE;"""
self.execute_sql(query)
self.commit()
self.close_database()
def reset_database(self):
"""
Re-create an empty database.
"""
if self._database_exists():
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
con.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
query = "DROP DATABASE {0};".format(self.dbname)
c = con.cursor()
c.execute(query)
con.close()
self.create_database()
def create_index(self, cardinality):
"""
Create an index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality to create a index for.
"""
DatabaseConnector.create_index(self, cardinality)
query = "CREATE INDEX idx_{0}_gram_varchar ON _{0}_gram(word varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
if self.lowercase:
for i in reversed(range(cardinality)):
if i != 0:
query = "CREATE INDEX idx_{0}_gram_{1}_lower ON _{0}_gram(LOWER(word_{1}));".format(cardinality, i)
self.execute_sql(query)
if self.normalize:
query = "CREATE INDEX idx_{0}_gram_lower_normalized_varchar ON _{0}_gram(NORMALIZE(LOWER(word)) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
else:
query = "CREATE INDEX idx_{0}_gram_lower_varchar ON _{0}_gram(LOWER(word) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
elif self.normalize:
query = "CREATE INDEX idx_{0}_gram_normalized_varchar ON _{0}_gram(NORMALIZE(word) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
def delete_index(self, cardinality):
"""
Delete index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality of the index to delete.
"""
DatabaseConnector.delete_index(self, cardinality)
query = "DROP INDEX IF EXISTS idx_{0}_gram_varchar;".format(cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_normalized_varchar;".format(
cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_lower_varchar;".format(
cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_lower_normalized_varchar;".\
format(cardinality)
self.execute_sql(query)
for i in reversed(range(cardinality)):
if i != 0:
query = "DROP INDEX IF EXISTS idx_{0}_gram_{1}_lower;".format(
cardinality, i)
self.execute_sql(query)
def commit(self):
"""
Sends a commit to the database.
"""
self.con.commit()
def open_database(self):
"""
Opens the sqlite database.
"""
if not self.con:
try:
self.con = psycopg2.connect(host=self.host,
database=self.dbname, user=self.user,
password=self.password, port=self.port)
except psycopg2.Error as e:
print("Error while opening database:")
print(e.pgerror)
def close_database(self):
"""
Closes the sqlite database.
"""
if self.con:
self.con.close()
self.con = None
def execute_sql(self, query):
"""
Executes a given query string on an open postgres database.
"""
c = self.con.cursor()
c.execute(query)
result = []
if c.rowcount > 0:
try:
result = c.fetchall()
except psycopg2.ProgrammingError:
pass
return result
############################################### Private methods
def _database_exists(self):
"""
Check if the database exists.
"""
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
query_check = "select datname from pg_catalog.pg_database"
query_check += " where datname = '{0}';".format(self.dbname)
c = con.cursor()
c.execute(query_check)
result = c.fetchall()
if len(result) > 0:
return True
return False
def _build_where_like_clause(self, ngram):
where_clause = " WHERE"
for i in range(len(ngram)):
if i < (len(ngram) - 1):
if self.lowercase:
where_clause += " LOWER(word_{0}) = LOWER('{1}') AND".format(
len(ngram) - i - 1, ngram[i])
else:
where_clause += " word_{0} = '{1}' AND".format(
len(ngram) - i - 1, ngram[i])
else:
if ngram[-1] != "":
if self.lowercase:
if self. normalize:
where_clause += " NORMALIZE(LOWER(word)) LIKE NORMALIZE(LOWER('{0}%'))".format(ngram[-1])
else:
where_clause += " LOWER(word) LIKE LOWER('{0}%')".format(ngram[-1])
elif self.normalize:
where_clause += " NORMALIZE(word) LIKE NORMALIZE('{0}%')".format(ngram[-1])
else:
where_clause += " word LIKE '{0}%'".format(ngram[-1])
else:
# remove the " AND"
where_clause = where_clause[:-4]
return where_clause
|
cidles/pressagio
|
src/pressagio/dbconnector.py
|
PostgresDatabaseConnector.reset_database
|
python
|
def reset_database(self):
if self._database_exists():
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
con.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
query = "DROP DATABASE {0};".format(self.dbname)
c = con.cursor()
c.execute(query)
con.close()
self.create_database()
|
Re-create an empty database.
|
train
|
https://github.com/cidles/pressagio/blob/2b3b89ae82316b929244e4c63e393682b2a57e57/src/pressagio/dbconnector.py#L472-L486
|
[
"def create_database(self):\n \"\"\"\n Creates an empty database if not exists.\n\n \"\"\"\n if not self._database_exists():\n con = psycopg2.connect(host=self.host, database=\"postgres\",\n user=self.user, password=self.password, port=self.port)\n con.set_isolation_level(\n psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n query = \"CREATE DATABASE {0};\".format(self.dbname)\n c = con.cursor()\n c.execute(query) \n con.close()\n\n\n if self.normalize:\n self.open_database()\n query = \"CREATE EXTENSION IF NOT EXISTS \\\"plperlu\\\";\"\n self.execute_sql(query)\n# query = \"\"\"CREATE OR REPLACE FUNCTION normalize(str text)\n#RETURNS text\n#AS $$\n#import unicodedata\n#return ''.join(c for c in unicodedata.normalize('NFKD', str)\n#if unicodedata.category(c) != 'Mn')\n#$$ LANGUAGE plpython3u IMMUTABLE;\"\"\"\n# query = \"\"\"CREATE OR REPLACE FUNCTION normalize(mystr text)\n# RETURNS text\n# AS $$\n# from unidecode import unidecode\n# return unidecode(mystr.decode(\"utf-8\"))\n# $$ LANGUAGE plpythonu IMMUTABLE;\"\"\"\n query = \"\"\"CREATE OR REPLACE FUNCTION normalize(text)\n RETURNS text\nAS $$\n use Text::Unidecode;\n return unidecode(shift);\n$$ LANGUAGE plperlu IMMUTABLE;\"\"\"\n self.execute_sql(query)\n self.commit()\n self.close_database()\n",
"def _database_exists(self):\n \"\"\"\n Check if the database exists.\n\n \"\"\"\n con = psycopg2.connect(host=self.host, database=\"postgres\",\n user=self.user, password=self.password, port=self.port)\n query_check = \"select datname from pg_catalog.pg_database\"\n query_check += \" where datname = '{0}';\".format(self.dbname)\n c = con.cursor()\n c.execute(query_check)\n result = c.fetchall()\n if len(result) > 0:\n return True\n return False\n"
] |
class PostgresDatabaseConnector(DatabaseConnector):
"""
Database connector for postgres databases.
"""
def __init__(self, dbname, cardinality = 1, host = "localhost", port = 5432,
user = "postgres", password = None, connection = None):
"""
Constructor for the postgres database connector.
Parameters
----------
dbname : str
the database name
cardinality : int
default cardinality for n-grams
host : str
hostname of the postgres database
port : int
port number of the postgres database
user : str
user name for the postgres database
password: str
user password for the postgres database
connection : connection
an open database connection
"""
DatabaseConnector.__init__(self, dbname, cardinality)
self.con = connection
self.host = host
self.port = port
self.user = user
self.password = password
def create_database(self):
"""
Creates an empty database if not exists.
"""
if not self._database_exists():
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
con.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
query = "CREATE DATABASE {0};".format(self.dbname)
c = con.cursor()
c.execute(query)
con.close()
if self.normalize:
self.open_database()
query = "CREATE EXTENSION IF NOT EXISTS \"plperlu\";"
self.execute_sql(query)
# query = """CREATE OR REPLACE FUNCTION normalize(str text)
#RETURNS text
#AS $$
#import unicodedata
#return ''.join(c for c in unicodedata.normalize('NFKD', str)
#if unicodedata.category(c) != 'Mn')
#$$ LANGUAGE plpython3u IMMUTABLE;"""
# query = """CREATE OR REPLACE FUNCTION normalize(mystr text)
# RETURNS text
# AS $$
# from unidecode import unidecode
# return unidecode(mystr.decode("utf-8"))
# $$ LANGUAGE plpythonu IMMUTABLE;"""
query = """CREATE OR REPLACE FUNCTION normalize(text)
RETURNS text
AS $$
use Text::Unidecode;
return unidecode(shift);
$$ LANGUAGE plperlu IMMUTABLE;"""
self.execute_sql(query)
self.commit()
self.close_database()
def create_index(self, cardinality):
"""
Create an index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality to create a index for.
"""
DatabaseConnector.create_index(self, cardinality)
query = "CREATE INDEX idx_{0}_gram_varchar ON _{0}_gram(word varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
if self.lowercase:
for i in reversed(range(cardinality)):
if i != 0:
query = "CREATE INDEX idx_{0}_gram_{1}_lower ON _{0}_gram(LOWER(word_{1}));".format(cardinality, i)
self.execute_sql(query)
if self.normalize:
query = "CREATE INDEX idx_{0}_gram_lower_normalized_varchar ON _{0}_gram(NORMALIZE(LOWER(word)) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
else:
query = "CREATE INDEX idx_{0}_gram_lower_varchar ON _{0}_gram(LOWER(word) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
elif self.normalize:
query = "CREATE INDEX idx_{0}_gram_normalized_varchar ON _{0}_gram(NORMALIZE(word) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
def delete_index(self, cardinality):
"""
Delete index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality of the index to delete.
"""
DatabaseConnector.delete_index(self, cardinality)
query = "DROP INDEX IF EXISTS idx_{0}_gram_varchar;".format(cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_normalized_varchar;".format(
cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_lower_varchar;".format(
cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_lower_normalized_varchar;".\
format(cardinality)
self.execute_sql(query)
for i in reversed(range(cardinality)):
if i != 0:
query = "DROP INDEX IF EXISTS idx_{0}_gram_{1}_lower;".format(
cardinality, i)
self.execute_sql(query)
def commit(self):
"""
Sends a commit to the database.
"""
self.con.commit()
def open_database(self):
"""
Opens the sqlite database.
"""
if not self.con:
try:
self.con = psycopg2.connect(host=self.host,
database=self.dbname, user=self.user,
password=self.password, port=self.port)
except psycopg2.Error as e:
print("Error while opening database:")
print(e.pgerror)
def close_database(self):
"""
Closes the sqlite database.
"""
if self.con:
self.con.close()
self.con = None
def execute_sql(self, query):
"""
Executes a given query string on an open postgres database.
"""
c = self.con.cursor()
c.execute(query)
result = []
if c.rowcount > 0:
try:
result = c.fetchall()
except psycopg2.ProgrammingError:
pass
return result
############################################### Private methods
def _database_exists(self):
"""
Check if the database exists.
"""
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
query_check = "select datname from pg_catalog.pg_database"
query_check += " where datname = '{0}';".format(self.dbname)
c = con.cursor()
c.execute(query_check)
result = c.fetchall()
if len(result) > 0:
return True
return False
def _build_where_like_clause(self, ngram):
where_clause = " WHERE"
for i in range(len(ngram)):
if i < (len(ngram) - 1):
if self.lowercase:
where_clause += " LOWER(word_{0}) = LOWER('{1}') AND".format(
len(ngram) - i - 1, ngram[i])
else:
where_clause += " word_{0} = '{1}' AND".format(
len(ngram) - i - 1, ngram[i])
else:
if ngram[-1] != "":
if self.lowercase:
if self. normalize:
where_clause += " NORMALIZE(LOWER(word)) LIKE NORMALIZE(LOWER('{0}%'))".format(ngram[-1])
else:
where_clause += " LOWER(word) LIKE LOWER('{0}%')".format(ngram[-1])
elif self.normalize:
where_clause += " NORMALIZE(word) LIKE NORMALIZE('{0}%')".format(ngram[-1])
else:
where_clause += " word LIKE '{0}%'".format(ngram[-1])
else:
# remove the " AND"
where_clause = where_clause[:-4]
return where_clause
|
cidles/pressagio
|
src/pressagio/dbconnector.py
|
PostgresDatabaseConnector.create_index
|
python
|
def create_index(self, cardinality):
DatabaseConnector.create_index(self, cardinality)
query = "CREATE INDEX idx_{0}_gram_varchar ON _{0}_gram(word varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
if self.lowercase:
for i in reversed(range(cardinality)):
if i != 0:
query = "CREATE INDEX idx_{0}_gram_{1}_lower ON _{0}_gram(LOWER(word_{1}));".format(cardinality, i)
self.execute_sql(query)
if self.normalize:
query = "CREATE INDEX idx_{0}_gram_lower_normalized_varchar ON _{0}_gram(NORMALIZE(LOWER(word)) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
else:
query = "CREATE INDEX idx_{0}_gram_lower_varchar ON _{0}_gram(LOWER(word) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
elif self.normalize:
query = "CREATE INDEX idx_{0}_gram_normalized_varchar ON _{0}_gram(NORMALIZE(word) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
|
Create an index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality to create a index for.
|
train
|
https://github.com/cidles/pressagio/blob/2b3b89ae82316b929244e4c63e393682b2a57e57/src/pressagio/dbconnector.py#L488-L522
|
[
"def create_index(self, cardinality):\n \"\"\"\n Create an index for the table with the given cardinality.\n\n Parameters\n ----------\n cardinality : int\n The cardinality to create a index for.\n\n \"\"\"\n for i in reversed(range(cardinality)):\n if i != 0:\n query = \"CREATE INDEX idx_{0}_gram_{1} ON _{0}_gram(word_{1});\".format(cardinality, i)\n self.execute_sql(query)\n",
"def execute_sql(self, query):\n \"\"\"\n Executes a given query string on an open postgres database.\n\n \"\"\"\n c = self.con.cursor()\n c.execute(query)\n result = []\n if c.rowcount > 0:\n try:\n result = c.fetchall()\n except psycopg2.ProgrammingError:\n pass\n return result\n"
] |
class PostgresDatabaseConnector(DatabaseConnector):
"""
Database connector for postgres databases.
"""
def __init__(self, dbname, cardinality = 1, host = "localhost", port = 5432,
user = "postgres", password = None, connection = None):
"""
Constructor for the postgres database connector.
Parameters
----------
dbname : str
the database name
cardinality : int
default cardinality for n-grams
host : str
hostname of the postgres database
port : int
port number of the postgres database
user : str
user name for the postgres database
password: str
user password for the postgres database
connection : connection
an open database connection
"""
DatabaseConnector.__init__(self, dbname, cardinality)
self.con = connection
self.host = host
self.port = port
self.user = user
self.password = password
def create_database(self):
"""
Creates an empty database if not exists.
"""
if not self._database_exists():
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
con.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
query = "CREATE DATABASE {0};".format(self.dbname)
c = con.cursor()
c.execute(query)
con.close()
if self.normalize:
self.open_database()
query = "CREATE EXTENSION IF NOT EXISTS \"plperlu\";"
self.execute_sql(query)
# query = """CREATE OR REPLACE FUNCTION normalize(str text)
#RETURNS text
#AS $$
#import unicodedata
#return ''.join(c for c in unicodedata.normalize('NFKD', str)
#if unicodedata.category(c) != 'Mn')
#$$ LANGUAGE plpython3u IMMUTABLE;"""
# query = """CREATE OR REPLACE FUNCTION normalize(mystr text)
# RETURNS text
# AS $$
# from unidecode import unidecode
# return unidecode(mystr.decode("utf-8"))
# $$ LANGUAGE plpythonu IMMUTABLE;"""
query = """CREATE OR REPLACE FUNCTION normalize(text)
RETURNS text
AS $$
use Text::Unidecode;
return unidecode(shift);
$$ LANGUAGE plperlu IMMUTABLE;"""
self.execute_sql(query)
self.commit()
self.close_database()
def reset_database(self):
"""
Re-create an empty database.
"""
if self._database_exists():
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
con.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
query = "DROP DATABASE {0};".format(self.dbname)
c = con.cursor()
c.execute(query)
con.close()
self.create_database()
def delete_index(self, cardinality):
"""
Delete index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality of the index to delete.
"""
DatabaseConnector.delete_index(self, cardinality)
query = "DROP INDEX IF EXISTS idx_{0}_gram_varchar;".format(cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_normalized_varchar;".format(
cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_lower_varchar;".format(
cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_lower_normalized_varchar;".\
format(cardinality)
self.execute_sql(query)
for i in reversed(range(cardinality)):
if i != 0:
query = "DROP INDEX IF EXISTS idx_{0}_gram_{1}_lower;".format(
cardinality, i)
self.execute_sql(query)
def commit(self):
"""
Sends a commit to the database.
"""
self.con.commit()
def open_database(self):
"""
Opens the sqlite database.
"""
if not self.con:
try:
self.con = psycopg2.connect(host=self.host,
database=self.dbname, user=self.user,
password=self.password, port=self.port)
except psycopg2.Error as e:
print("Error while opening database:")
print(e.pgerror)
def close_database(self):
"""
Closes the sqlite database.
"""
if self.con:
self.con.close()
self.con = None
def execute_sql(self, query):
"""
Executes a given query string on an open postgres database.
"""
c = self.con.cursor()
c.execute(query)
result = []
if c.rowcount > 0:
try:
result = c.fetchall()
except psycopg2.ProgrammingError:
pass
return result
############################################### Private methods
def _database_exists(self):
"""
Check if the database exists.
"""
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
query_check = "select datname from pg_catalog.pg_database"
query_check += " where datname = '{0}';".format(self.dbname)
c = con.cursor()
c.execute(query_check)
result = c.fetchall()
if len(result) > 0:
return True
return False
def _build_where_like_clause(self, ngram):
where_clause = " WHERE"
for i in range(len(ngram)):
if i < (len(ngram) - 1):
if self.lowercase:
where_clause += " LOWER(word_{0}) = LOWER('{1}') AND".format(
len(ngram) - i - 1, ngram[i])
else:
where_clause += " word_{0} = '{1}' AND".format(
len(ngram) - i - 1, ngram[i])
else:
if ngram[-1] != "":
if self.lowercase:
if self. normalize:
where_clause += " NORMALIZE(LOWER(word)) LIKE NORMALIZE(LOWER('{0}%'))".format(ngram[-1])
else:
where_clause += " LOWER(word) LIKE LOWER('{0}%')".format(ngram[-1])
elif self.normalize:
where_clause += " NORMALIZE(word) LIKE NORMALIZE('{0}%')".format(ngram[-1])
else:
where_clause += " word LIKE '{0}%'".format(ngram[-1])
else:
# remove the " AND"
where_clause = where_clause[:-4]
return where_clause
|
cidles/pressagio
|
src/pressagio/dbconnector.py
|
PostgresDatabaseConnector.delete_index
|
python
|
def delete_index(self, cardinality):
DatabaseConnector.delete_index(self, cardinality)
query = "DROP INDEX IF EXISTS idx_{0}_gram_varchar;".format(cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_normalized_varchar;".format(
cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_lower_varchar;".format(
cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_lower_normalized_varchar;".\
format(cardinality)
self.execute_sql(query)
for i in reversed(range(cardinality)):
if i != 0:
query = "DROP INDEX IF EXISTS idx_{0}_gram_{1}_lower;".format(
cardinality, i)
self.execute_sql(query)
|
Delete index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality of the index to delete.
|
train
|
https://github.com/cidles/pressagio/blob/2b3b89ae82316b929244e4c63e393682b2a57e57/src/pressagio/dbconnector.py#L524-L551
|
[
"def delete_index(self, cardinality):\n \"\"\"\n Delete index for the table with the given cardinality.\n\n Parameters\n ----------\n cardinality : int\n The cardinality of the index to delete.\n\n \"\"\"\n for i in reversed(range(cardinality)):\n if i != 0:\n query = \"DROP INDEX IF EXISTS idx_{0}_gram_{1};\".format(\n cardinality, i)\n self.execute_sql(query)\n",
"def execute_sql(self, query):\n \"\"\"\n Executes a given query string on an open postgres database.\n\n \"\"\"\n c = self.con.cursor()\n c.execute(query)\n result = []\n if c.rowcount > 0:\n try:\n result = c.fetchall()\n except psycopg2.ProgrammingError:\n pass\n return result\n"
] |
class PostgresDatabaseConnector(DatabaseConnector):
"""
Database connector for postgres databases.
"""
def __init__(self, dbname, cardinality = 1, host = "localhost", port = 5432,
user = "postgres", password = None, connection = None):
"""
Constructor for the postgres database connector.
Parameters
----------
dbname : str
the database name
cardinality : int
default cardinality for n-grams
host : str
hostname of the postgres database
port : int
port number of the postgres database
user : str
user name for the postgres database
password: str
user password for the postgres database
connection : connection
an open database connection
"""
DatabaseConnector.__init__(self, dbname, cardinality)
self.con = connection
self.host = host
self.port = port
self.user = user
self.password = password
def create_database(self):
"""
Creates an empty database if not exists.
"""
if not self._database_exists():
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
con.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
query = "CREATE DATABASE {0};".format(self.dbname)
c = con.cursor()
c.execute(query)
con.close()
if self.normalize:
self.open_database()
query = "CREATE EXTENSION IF NOT EXISTS \"plperlu\";"
self.execute_sql(query)
# query = """CREATE OR REPLACE FUNCTION normalize(str text)
#RETURNS text
#AS $$
#import unicodedata
#return ''.join(c for c in unicodedata.normalize('NFKD', str)
#if unicodedata.category(c) != 'Mn')
#$$ LANGUAGE plpython3u IMMUTABLE;"""
# query = """CREATE OR REPLACE FUNCTION normalize(mystr text)
# RETURNS text
# AS $$
# from unidecode import unidecode
# return unidecode(mystr.decode("utf-8"))
# $$ LANGUAGE plpythonu IMMUTABLE;"""
query = """CREATE OR REPLACE FUNCTION normalize(text)
RETURNS text
AS $$
use Text::Unidecode;
return unidecode(shift);
$$ LANGUAGE plperlu IMMUTABLE;"""
self.execute_sql(query)
self.commit()
self.close_database()
def reset_database(self):
"""
Re-create an empty database.
"""
if self._database_exists():
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
con.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
query = "DROP DATABASE {0};".format(self.dbname)
c = con.cursor()
c.execute(query)
con.close()
self.create_database()
def create_index(self, cardinality):
"""
Create an index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality to create a index for.
"""
DatabaseConnector.create_index(self, cardinality)
query = "CREATE INDEX idx_{0}_gram_varchar ON _{0}_gram(word varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
if self.lowercase:
for i in reversed(range(cardinality)):
if i != 0:
query = "CREATE INDEX idx_{0}_gram_{1}_lower ON _{0}_gram(LOWER(word_{1}));".format(cardinality, i)
self.execute_sql(query)
if self.normalize:
query = "CREATE INDEX idx_{0}_gram_lower_normalized_varchar ON _{0}_gram(NORMALIZE(LOWER(word)) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
else:
query = "CREATE INDEX idx_{0}_gram_lower_varchar ON _{0}_gram(LOWER(word) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
elif self.normalize:
query = "CREATE INDEX idx_{0}_gram_normalized_varchar ON _{0}_gram(NORMALIZE(word) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
def commit(self):
"""
Sends a commit to the database.
"""
self.con.commit()
def open_database(self):
"""
Opens the sqlite database.
"""
if not self.con:
try:
self.con = psycopg2.connect(host=self.host,
database=self.dbname, user=self.user,
password=self.password, port=self.port)
except psycopg2.Error as e:
print("Error while opening database:")
print(e.pgerror)
def close_database(self):
"""
Closes the sqlite database.
"""
if self.con:
self.con.close()
self.con = None
def execute_sql(self, query):
"""
Executes a given query string on an open postgres database.
"""
c = self.con.cursor()
c.execute(query)
result = []
if c.rowcount > 0:
try:
result = c.fetchall()
except psycopg2.ProgrammingError:
pass
return result
############################################### Private methods
def _database_exists(self):
"""
Check if the database exists.
"""
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
query_check = "select datname from pg_catalog.pg_database"
query_check += " where datname = '{0}';".format(self.dbname)
c = con.cursor()
c.execute(query_check)
result = c.fetchall()
if len(result) > 0:
return True
return False
def _build_where_like_clause(self, ngram):
where_clause = " WHERE"
for i in range(len(ngram)):
if i < (len(ngram) - 1):
if self.lowercase:
where_clause += " LOWER(word_{0}) = LOWER('{1}') AND".format(
len(ngram) - i - 1, ngram[i])
else:
where_clause += " word_{0} = '{1}' AND".format(
len(ngram) - i - 1, ngram[i])
else:
if ngram[-1] != "":
if self.lowercase:
if self. normalize:
where_clause += " NORMALIZE(LOWER(word)) LIKE NORMALIZE(LOWER('{0}%'))".format(ngram[-1])
else:
where_clause += " LOWER(word) LIKE LOWER('{0}%')".format(ngram[-1])
elif self.normalize:
where_clause += " NORMALIZE(word) LIKE NORMALIZE('{0}%')".format(ngram[-1])
else:
where_clause += " word LIKE '{0}%'".format(ngram[-1])
else:
# remove the " AND"
where_clause = where_clause[:-4]
return where_clause
|
cidles/pressagio
|
src/pressagio/dbconnector.py
|
PostgresDatabaseConnector.open_database
|
python
|
def open_database(self):
if not self.con:
try:
self.con = psycopg2.connect(host=self.host,
database=self.dbname, user=self.user,
password=self.password, port=self.port)
except psycopg2.Error as e:
print("Error while opening database:")
print(e.pgerror)
|
Opens the sqlite database.
|
train
|
https://github.com/cidles/pressagio/blob/2b3b89ae82316b929244e4c63e393682b2a57e57/src/pressagio/dbconnector.py#L560-L572
| null |
class PostgresDatabaseConnector(DatabaseConnector):
"""
Database connector for postgres databases.
"""
def __init__(self, dbname, cardinality = 1, host = "localhost", port = 5432,
user = "postgres", password = None, connection = None):
"""
Constructor for the postgres database connector.
Parameters
----------
dbname : str
the database name
cardinality : int
default cardinality for n-grams
host : str
hostname of the postgres database
port : int
port number of the postgres database
user : str
user name for the postgres database
password: str
user password for the postgres database
connection : connection
an open database connection
"""
DatabaseConnector.__init__(self, dbname, cardinality)
self.con = connection
self.host = host
self.port = port
self.user = user
self.password = password
def create_database(self):
"""
Creates an empty database if not exists.
"""
if not self._database_exists():
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
con.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
query = "CREATE DATABASE {0};".format(self.dbname)
c = con.cursor()
c.execute(query)
con.close()
if self.normalize:
self.open_database()
query = "CREATE EXTENSION IF NOT EXISTS \"plperlu\";"
self.execute_sql(query)
# query = """CREATE OR REPLACE FUNCTION normalize(str text)
#RETURNS text
#AS $$
#import unicodedata
#return ''.join(c for c in unicodedata.normalize('NFKD', str)
#if unicodedata.category(c) != 'Mn')
#$$ LANGUAGE plpython3u IMMUTABLE;"""
# query = """CREATE OR REPLACE FUNCTION normalize(mystr text)
# RETURNS text
# AS $$
# from unidecode import unidecode
# return unidecode(mystr.decode("utf-8"))
# $$ LANGUAGE plpythonu IMMUTABLE;"""
query = """CREATE OR REPLACE FUNCTION normalize(text)
RETURNS text
AS $$
use Text::Unidecode;
return unidecode(shift);
$$ LANGUAGE plperlu IMMUTABLE;"""
self.execute_sql(query)
self.commit()
self.close_database()
def reset_database(self):
"""
Re-create an empty database.
"""
if self._database_exists():
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
con.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
query = "DROP DATABASE {0};".format(self.dbname)
c = con.cursor()
c.execute(query)
con.close()
self.create_database()
def create_index(self, cardinality):
"""
Create an index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality to create a index for.
"""
DatabaseConnector.create_index(self, cardinality)
query = "CREATE INDEX idx_{0}_gram_varchar ON _{0}_gram(word varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
if self.lowercase:
for i in reversed(range(cardinality)):
if i != 0:
query = "CREATE INDEX idx_{0}_gram_{1}_lower ON _{0}_gram(LOWER(word_{1}));".format(cardinality, i)
self.execute_sql(query)
if self.normalize:
query = "CREATE INDEX idx_{0}_gram_lower_normalized_varchar ON _{0}_gram(NORMALIZE(LOWER(word)) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
else:
query = "CREATE INDEX idx_{0}_gram_lower_varchar ON _{0}_gram(LOWER(word) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
elif self.normalize:
query = "CREATE INDEX idx_{0}_gram_normalized_varchar ON _{0}_gram(NORMALIZE(word) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
def delete_index(self, cardinality):
"""
Delete index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality of the index to delete.
"""
DatabaseConnector.delete_index(self, cardinality)
query = "DROP INDEX IF EXISTS idx_{0}_gram_varchar;".format(cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_normalized_varchar;".format(
cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_lower_varchar;".format(
cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_lower_normalized_varchar;".\
format(cardinality)
self.execute_sql(query)
for i in reversed(range(cardinality)):
if i != 0:
query = "DROP INDEX IF EXISTS idx_{0}_gram_{1}_lower;".format(
cardinality, i)
self.execute_sql(query)
def commit(self):
"""
Sends a commit to the database.
"""
self.con.commit()
def close_database(self):
"""
Closes the sqlite database.
"""
if self.con:
self.con.close()
self.con = None
def execute_sql(self, query):
"""
Executes a given query string on an open postgres database.
"""
c = self.con.cursor()
c.execute(query)
result = []
if c.rowcount > 0:
try:
result = c.fetchall()
except psycopg2.ProgrammingError:
pass
return result
############################################### Private methods
def _database_exists(self):
"""
Check if the database exists.
"""
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
query_check = "select datname from pg_catalog.pg_database"
query_check += " where datname = '{0}';".format(self.dbname)
c = con.cursor()
c.execute(query_check)
result = c.fetchall()
if len(result) > 0:
return True
return False
def _build_where_like_clause(self, ngram):
where_clause = " WHERE"
for i in range(len(ngram)):
if i < (len(ngram) - 1):
if self.lowercase:
where_clause += " LOWER(word_{0}) = LOWER('{1}') AND".format(
len(ngram) - i - 1, ngram[i])
else:
where_clause += " word_{0} = '{1}' AND".format(
len(ngram) - i - 1, ngram[i])
else:
if ngram[-1] != "":
if self.lowercase:
if self. normalize:
where_clause += " NORMALIZE(LOWER(word)) LIKE NORMALIZE(LOWER('{0}%'))".format(ngram[-1])
else:
where_clause += " LOWER(word) LIKE LOWER('{0}%')".format(ngram[-1])
elif self.normalize:
where_clause += " NORMALIZE(word) LIKE NORMALIZE('{0}%')".format(ngram[-1])
else:
where_clause += " word LIKE '{0}%'".format(ngram[-1])
else:
# remove the " AND"
where_clause = where_clause[:-4]
return where_clause
|
cidles/pressagio
|
src/pressagio/dbconnector.py
|
PostgresDatabaseConnector.execute_sql
|
python
|
def execute_sql(self, query):
c = self.con.cursor()
c.execute(query)
result = []
if c.rowcount > 0:
try:
result = c.fetchall()
except psycopg2.ProgrammingError:
pass
return result
|
Executes a given query string on an open postgres database.
|
train
|
https://github.com/cidles/pressagio/blob/2b3b89ae82316b929244e4c63e393682b2a57e57/src/pressagio/dbconnector.py#L583-L596
| null |
class PostgresDatabaseConnector(DatabaseConnector):
"""
Database connector for postgres databases.
"""
def __init__(self, dbname, cardinality = 1, host = "localhost", port = 5432,
user = "postgres", password = None, connection = None):
"""
Constructor for the postgres database connector.
Parameters
----------
dbname : str
the database name
cardinality : int
default cardinality for n-grams
host : str
hostname of the postgres database
port : int
port number of the postgres database
user : str
user name for the postgres database
password: str
user password for the postgres database
connection : connection
an open database connection
"""
DatabaseConnector.__init__(self, dbname, cardinality)
self.con = connection
self.host = host
self.port = port
self.user = user
self.password = password
def create_database(self):
"""
Creates an empty database if not exists.
"""
if not self._database_exists():
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
con.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
query = "CREATE DATABASE {0};".format(self.dbname)
c = con.cursor()
c.execute(query)
con.close()
if self.normalize:
self.open_database()
query = "CREATE EXTENSION IF NOT EXISTS \"plperlu\";"
self.execute_sql(query)
# query = """CREATE OR REPLACE FUNCTION normalize(str text)
#RETURNS text
#AS $$
#import unicodedata
#return ''.join(c for c in unicodedata.normalize('NFKD', str)
#if unicodedata.category(c) != 'Mn')
#$$ LANGUAGE plpython3u IMMUTABLE;"""
# query = """CREATE OR REPLACE FUNCTION normalize(mystr text)
# RETURNS text
# AS $$
# from unidecode import unidecode
# return unidecode(mystr.decode("utf-8"))
# $$ LANGUAGE plpythonu IMMUTABLE;"""
query = """CREATE OR REPLACE FUNCTION normalize(text)
RETURNS text
AS $$
use Text::Unidecode;
return unidecode(shift);
$$ LANGUAGE plperlu IMMUTABLE;"""
self.execute_sql(query)
self.commit()
self.close_database()
def reset_database(self):
"""
Re-create an empty database.
"""
if self._database_exists():
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
con.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
query = "DROP DATABASE {0};".format(self.dbname)
c = con.cursor()
c.execute(query)
con.close()
self.create_database()
def create_index(self, cardinality):
"""
Create an index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality to create a index for.
"""
DatabaseConnector.create_index(self, cardinality)
query = "CREATE INDEX idx_{0}_gram_varchar ON _{0}_gram(word varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
if self.lowercase:
for i in reversed(range(cardinality)):
if i != 0:
query = "CREATE INDEX idx_{0}_gram_{1}_lower ON _{0}_gram(LOWER(word_{1}));".format(cardinality, i)
self.execute_sql(query)
if self.normalize:
query = "CREATE INDEX idx_{0}_gram_lower_normalized_varchar ON _{0}_gram(NORMALIZE(LOWER(word)) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
else:
query = "CREATE INDEX idx_{0}_gram_lower_varchar ON _{0}_gram(LOWER(word) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
elif self.normalize:
query = "CREATE INDEX idx_{0}_gram_normalized_varchar ON _{0}_gram(NORMALIZE(word) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
def delete_index(self, cardinality):
"""
Delete index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality of the index to delete.
"""
DatabaseConnector.delete_index(self, cardinality)
query = "DROP INDEX IF EXISTS idx_{0}_gram_varchar;".format(cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_normalized_varchar;".format(
cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_lower_varchar;".format(
cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_lower_normalized_varchar;".\
format(cardinality)
self.execute_sql(query)
for i in reversed(range(cardinality)):
if i != 0:
query = "DROP INDEX IF EXISTS idx_{0}_gram_{1}_lower;".format(
cardinality, i)
self.execute_sql(query)
def commit(self):
"""
Sends a commit to the database.
"""
self.con.commit()
def open_database(self):
"""
Opens the sqlite database.
"""
if not self.con:
try:
self.con = psycopg2.connect(host=self.host,
database=self.dbname, user=self.user,
password=self.password, port=self.port)
except psycopg2.Error as e:
print("Error while opening database:")
print(e.pgerror)
def close_database(self):
"""
Closes the sqlite database.
"""
if self.con:
self.con.close()
self.con = None
############################################### Private methods
def _database_exists(self):
"""
Check if the database exists.
"""
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
query_check = "select datname from pg_catalog.pg_database"
query_check += " where datname = '{0}';".format(self.dbname)
c = con.cursor()
c.execute(query_check)
result = c.fetchall()
if len(result) > 0:
return True
return False
def _build_where_like_clause(self, ngram):
where_clause = " WHERE"
for i in range(len(ngram)):
if i < (len(ngram) - 1):
if self.lowercase:
where_clause += " LOWER(word_{0}) = LOWER('{1}') AND".format(
len(ngram) - i - 1, ngram[i])
else:
where_clause += " word_{0} = '{1}' AND".format(
len(ngram) - i - 1, ngram[i])
else:
if ngram[-1] != "":
if self.lowercase:
if self. normalize:
where_clause += " NORMALIZE(LOWER(word)) LIKE NORMALIZE(LOWER('{0}%'))".format(ngram[-1])
else:
where_clause += " LOWER(word) LIKE LOWER('{0}%')".format(ngram[-1])
elif self.normalize:
where_clause += " NORMALIZE(word) LIKE NORMALIZE('{0}%')".format(ngram[-1])
else:
where_clause += " word LIKE '{0}%'".format(ngram[-1])
else:
# remove the " AND"
where_clause = where_clause[:-4]
return where_clause
|
cidles/pressagio
|
src/pressagio/dbconnector.py
|
PostgresDatabaseConnector._database_exists
|
python
|
def _database_exists(self):
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
query_check = "select datname from pg_catalog.pg_database"
query_check += " where datname = '{0}';".format(self.dbname)
c = con.cursor()
c.execute(query_check)
result = c.fetchall()
if len(result) > 0:
return True
return False
|
Check if the database exists.
|
train
|
https://github.com/cidles/pressagio/blob/2b3b89ae82316b929244e4c63e393682b2a57e57/src/pressagio/dbconnector.py#L601-L615
| null |
class PostgresDatabaseConnector(DatabaseConnector):
"""
Database connector for postgres databases.
"""
def __init__(self, dbname, cardinality = 1, host = "localhost", port = 5432,
user = "postgres", password = None, connection = None):
"""
Constructor for the postgres database connector.
Parameters
----------
dbname : str
the database name
cardinality : int
default cardinality for n-grams
host : str
hostname of the postgres database
port : int
port number of the postgres database
user : str
user name for the postgres database
password: str
user password for the postgres database
connection : connection
an open database connection
"""
DatabaseConnector.__init__(self, dbname, cardinality)
self.con = connection
self.host = host
self.port = port
self.user = user
self.password = password
def create_database(self):
"""
Creates an empty database if not exists.
"""
if not self._database_exists():
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
con.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
query = "CREATE DATABASE {0};".format(self.dbname)
c = con.cursor()
c.execute(query)
con.close()
if self.normalize:
self.open_database()
query = "CREATE EXTENSION IF NOT EXISTS \"plperlu\";"
self.execute_sql(query)
# query = """CREATE OR REPLACE FUNCTION normalize(str text)
#RETURNS text
#AS $$
#import unicodedata
#return ''.join(c for c in unicodedata.normalize('NFKD', str)
#if unicodedata.category(c) != 'Mn')
#$$ LANGUAGE plpython3u IMMUTABLE;"""
# query = """CREATE OR REPLACE FUNCTION normalize(mystr text)
# RETURNS text
# AS $$
# from unidecode import unidecode
# return unidecode(mystr.decode("utf-8"))
# $$ LANGUAGE plpythonu IMMUTABLE;"""
query = """CREATE OR REPLACE FUNCTION normalize(text)
RETURNS text
AS $$
use Text::Unidecode;
return unidecode(shift);
$$ LANGUAGE plperlu IMMUTABLE;"""
self.execute_sql(query)
self.commit()
self.close_database()
def reset_database(self):
"""
Re-create an empty database.
"""
if self._database_exists():
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
con.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
query = "DROP DATABASE {0};".format(self.dbname)
c = con.cursor()
c.execute(query)
con.close()
self.create_database()
def create_index(self, cardinality):
"""
Create an index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality to create a index for.
"""
DatabaseConnector.create_index(self, cardinality)
query = "CREATE INDEX idx_{0}_gram_varchar ON _{0}_gram(word varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
if self.lowercase:
for i in reversed(range(cardinality)):
if i != 0:
query = "CREATE INDEX idx_{0}_gram_{1}_lower ON _{0}_gram(LOWER(word_{1}));".format(cardinality, i)
self.execute_sql(query)
if self.normalize:
query = "CREATE INDEX idx_{0}_gram_lower_normalized_varchar ON _{0}_gram(NORMALIZE(LOWER(word)) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
else:
query = "CREATE INDEX idx_{0}_gram_lower_varchar ON _{0}_gram(LOWER(word) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
elif self.normalize:
query = "CREATE INDEX idx_{0}_gram_normalized_varchar ON _{0}_gram(NORMALIZE(word) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
def delete_index(self, cardinality):
"""
Delete index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality of the index to delete.
"""
DatabaseConnector.delete_index(self, cardinality)
query = "DROP INDEX IF EXISTS idx_{0}_gram_varchar;".format(cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_normalized_varchar;".format(
cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_lower_varchar;".format(
cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_lower_normalized_varchar;".\
format(cardinality)
self.execute_sql(query)
for i in reversed(range(cardinality)):
if i != 0:
query = "DROP INDEX IF EXISTS idx_{0}_gram_{1}_lower;".format(
cardinality, i)
self.execute_sql(query)
def commit(self):
"""
Sends a commit to the database.
"""
self.con.commit()
def open_database(self):
"""
Opens the sqlite database.
"""
if not self.con:
try:
self.con = psycopg2.connect(host=self.host,
database=self.dbname, user=self.user,
password=self.password, port=self.port)
except psycopg2.Error as e:
print("Error while opening database:")
print(e.pgerror)
def close_database(self):
"""
Closes the sqlite database.
"""
if self.con:
self.con.close()
self.con = None
def execute_sql(self, query):
"""
Executes a given query string on an open postgres database.
"""
c = self.con.cursor()
c.execute(query)
result = []
if c.rowcount > 0:
try:
result = c.fetchall()
except psycopg2.ProgrammingError:
pass
return result
############################################### Private methods
def _build_where_like_clause(self, ngram):
where_clause = " WHERE"
for i in range(len(ngram)):
if i < (len(ngram) - 1):
if self.lowercase:
where_clause += " LOWER(word_{0}) = LOWER('{1}') AND".format(
len(ngram) - i - 1, ngram[i])
else:
where_clause += " word_{0} = '{1}' AND".format(
len(ngram) - i - 1, ngram[i])
else:
if ngram[-1] != "":
if self.lowercase:
if self. normalize:
where_clause += " NORMALIZE(LOWER(word)) LIKE NORMALIZE(LOWER('{0}%'))".format(ngram[-1])
else:
where_clause += " LOWER(word) LIKE LOWER('{0}%')".format(ngram[-1])
elif self.normalize:
where_clause += " NORMALIZE(word) LIKE NORMALIZE('{0}%')".format(ngram[-1])
else:
where_clause += " word LIKE '{0}%'".format(ngram[-1])
else:
# remove the " AND"
where_clause = where_clause[:-4]
return where_clause
|
cidles/pressagio
|
src/pressagio/tokenizer.py
|
Tokenizer.is_blankspace
|
python
|
def is_blankspace(self, char):
if len(char) > 1:
raise TypeError("Expected a char.")
if char in self.blankspaces:
return True
return False
|
Test if a character is a blankspace.
Parameters
----------
char : str
The character to test.
Returns
-------
ret : bool
True if character is a blankspace, False otherwise.
|
train
|
https://github.com/cidles/pressagio/blob/2b3b89ae82316b929244e4c63e393682b2a57e57/src/pressagio/tokenizer.py#L56-L75
| null |
class Tokenizer(object):
"""
Base class for all tokenizers.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, stream, blankspaces = pressagio.character.blankspaces,
separators = pressagio.character.separators):
"""
Constructor of the Tokenizer base class.
Parameters
----------
stream : str or io.IOBase
The stream to tokenize. Can be a filename or any open IO stream.
blankspaces : str
The characters that represent empty spaces.
separators : str
The characters that separate token units (e.g. word boundaries).
"""
self.separators = separators
self.blankspaces = blankspaces
self.lowercase = False
self.offbeg = 0
self.offset = None
self.offend = None
def is_separator(self, char):
"""
Test if a character is a separator.
Parameters
----------
char : str
The character to test.
Returns
-------
ret : bool
True if character is a separator, False otherwise.
"""
if len(char) > 1:
raise TypeError("Expected a char.")
if char in self.separators:
return True
return False
@abc.abstractmethod
def count_characters(self):
raise NotImplementedError("Method must be implemented")
@abc.abstractmethod
def reset_stream(self):
raise NotImplementedError("Method must be implemented")
@abc.abstractmethod
def count_tokens(self):
raise NotImplementedError("Method must be implemented")
@abc.abstractmethod
def has_more_tokens(self):
raise NotImplementedError("Method must be implemented")
@abc.abstractmethod
def next_token(self):
raise NotImplementedError("Method must be implemented")
@abc.abstractmethod
def progress(self):
raise NotImplementedError("Method must be implemented")
|
cidles/pressagio
|
src/pressagio/tokenizer.py
|
Tokenizer.is_separator
|
python
|
def is_separator(self, char):
if len(char) > 1:
raise TypeError("Expected a char.")
if char in self.separators:
return True
return False
|
Test if a character is a separator.
Parameters
----------
char : str
The character to test.
Returns
-------
ret : bool
True if character is a separator, False otherwise.
|
train
|
https://github.com/cidles/pressagio/blob/2b3b89ae82316b929244e4c63e393682b2a57e57/src/pressagio/tokenizer.py#L77-L96
| null |
class Tokenizer(object):
"""
Base class for all tokenizers.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, stream, blankspaces = pressagio.character.blankspaces,
separators = pressagio.character.separators):
"""
Constructor of the Tokenizer base class.
Parameters
----------
stream : str or io.IOBase
The stream to tokenize. Can be a filename or any open IO stream.
blankspaces : str
The characters that represent empty spaces.
separators : str
The characters that separate token units (e.g. word boundaries).
"""
self.separators = separators
self.blankspaces = blankspaces
self.lowercase = False
self.offbeg = 0
self.offset = None
self.offend = None
def is_blankspace(self, char):
"""
Test if a character is a blankspace.
Parameters
----------
char : str
The character to test.
Returns
-------
ret : bool
True if character is a blankspace, False otherwise.
"""
if len(char) > 1:
raise TypeError("Expected a char.")
if char in self.blankspaces:
return True
return False
@abc.abstractmethod
def count_characters(self):
raise NotImplementedError("Method must be implemented")
@abc.abstractmethod
def reset_stream(self):
raise NotImplementedError("Method must be implemented")
@abc.abstractmethod
def count_tokens(self):
raise NotImplementedError("Method must be implemented")
@abc.abstractmethod
def has_more_tokens(self):
raise NotImplementedError("Method must be implemented")
@abc.abstractmethod
def next_token(self):
raise NotImplementedError("Method must be implemented")
@abc.abstractmethod
def progress(self):
raise NotImplementedError("Method must be implemented")
|
feliperyan/EinsteinVisionPython
|
EinsteinVision/EinsteinVision.py
|
EinsteinVisionService.get_token
|
python
|
def get_token(self):
payload = {
'aud': API_OAUTH,
'exp': time.time()+600, # 10 minutes
'sub': self.email
}
header = {'Content-type':'application/x-www-form-urlencoded'}
assertion = jwt.encode(payload, self.private_key, algorithm='RS256')
assertion = assertion.decode('utf-8')
response = requests.post(
url=API_OAUTH,
headers=header,
data='grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion='+assertion
)
print(response.text)
if response.status_code == 200:
print('status 200 ok for Token')
self.token = response.json()['access_token']
else:
print('Could not get Token. Status: ' + str(response.status_code))
return response
|
Acquires a token for futher API calls, unless you already have a token this will be the first thing
you do before you use this.
:param email: string, the username for your EinsteinVision service, usually in email form
:para pem_file: string, file containing your Secret key. Copy contents of relevant Config Var
on Heroku to a file locally.
attention: this will set self.token on success
attention: currently spitting out results via a simple print
returns: requests object
|
train
|
https://github.com/feliperyan/EinsteinVisionPython/blob/c761c46c7dc5fe8bbe7b15a6e1166a3585ed3cfb/EinsteinVision/EinsteinVision.py#L48-L83
| null |
class EinsteinVisionService:
""" A wrapper for Salesforce's Einstein Vision API.
:param token: string, in case you obtained a token somewhere else and want to use it here.
:param email: string, the username for your Enstein Vision account, not needed if you already have a token
:param rsa_cert: string, most likely coming straight from the Heroku Config Vars
:param pem_file: string, name of a file containing your secret key, defaults to predictive_services.pem
"""
def __init__(self, token=None, email=None, rsa_cert=None, pem_file='predictive_services.pem'):
self.token = token
self.email = email
if token is None:
if rsa_cert is None:
with open(pem_file, 'r') as pem:
self.private_key = pem.read()
else:
self.private_key = rsa_cert
def check_for_token(self, token=None):
if token:
return token
else:
return self.token
def get_model_info(self, model_id, token=None, url=API_GET_MODEL_INFO):
""" Gets information about a specific previously trained model, ie: stats and accuracy
:param model_id: string, model_id previously supplied by the API
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_datasets_info(self, token=None, url=API_GET_DATASETS_INFO):
""" Gets information on all datasets for this account
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.get(the_url, headers=h)
return r
def get_url_image_prediction(self, model_id, picture_url, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied picture url based on a previously trained model.
:param model_id: string, once you train a model you'll be given a model id to use.
:param picture_url: string, in the form of a url pointing to a publicly accessible
image file.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'sampleLocation':picture_url, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_fileb64_image_prediction(self, model_id, filename, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image on your machine, by encoding the image data as b64
and posting to the API.
:param model_id: string, once you train a model you'll be given a model id to use.
:param filename: string, the name of a file to be posted to the api.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
with open(filename, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def get_b64_image_prediction(self, model_id, b64_encoded_string, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image enconded as a b64 string, useful when uploading
images to a server backed by this library.
:param model_id: string, once you train a model you'll be given a model id to use.
:param b64_encoded_string: string, a b64 enconded string representation of an image.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
encoded_string = b64_encoded_string
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def create_dataset_synchronous(self, file_url, dataset_type='image', token=None, url=API_CREATE_DATASET):
""" Creates a dataset so you can train models from it
:param file_url: string, url to an accessible zip file containing the necessary image files
and folder structure indicating the labels to train. See docs online.
:param dataset_type: string, one of the dataset types, available options Nov 2017 were
'image', 'image-detection' and 'image-multi-label'.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'type':dataset_type, 'path':file_url})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def train_model(self, dataset_id, model_name, token=None, url=API_TRAIN_MODEL):
""" Train a model given a specifi dataset previously created
:param dataset_id: string, the id of a previously created dataset
:param model_name: string, what you will call this model
attention: This may take a while and a response will be returned before the model has
finished being trained. See docos and method get_training_status.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'name':model_name, 'datasetId':dataset_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_training_status(self, model_id, token=None, url=API_TRAIN_MODEL):
""" Gets status on the training process once you create a model
:param model_id: string, id of the model to check
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_models_info_for_dataset(self, dataset_id, token=None, url=API_GET_MODELS):
""" Gets metadata on all models available for given dataset id
:param dataset_id: string, previously obtained dataset id
warning: if providing your own url here, also include the dataset_id in the right place
as this method will not include it for you. Otherwise use the dataset_id attribute as
per usual
returns: a requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODELS:
r = requests.get(the_url, headers=h)
return r
the_url = url.replace('<dataset_id>', dataset_id)
r = requests.get(the_url, headers=h)
return r
def create_language_dataset_from_url(self, file_url, token=None, url=API_CREATE_LANGUAGE_DATASET):
""" Creates a dataset from a publicly accessible file stored in the cloud.
:param file_url: string, in the form of a URL to a file accessible on the cloud.
Popular options include Dropbox, AWS S3, Google Drive.
warning: Google Drive by default gives you a link to a web ui that allows you to download a file
NOT to the file directly. There is a way to change the link to point directly to the file as of 2018
as this may change, please search google for a solution.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'type': (None, 'text-intent'), 'path':(None, file_url)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
# example grive direct download:
# https://drive.google.com/uc?export=download&id=1ETMujAjIQgXVnAL-e99rTbeVjZk_4j5o
def train_language_model_from_dataset(self, dataset_id, name, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Trains a model given a dataset and its ID.
:param dataset_id: string, the ID for a dataset you created previously.
:param name: string, name for your model.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'name': (None, name), 'datasetId':(None, dataset_id)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def get_language_model_status(self, model_id, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Gets the status of your model, including whether the training has finished.
:param model_id: string, the ID for a model you created previously.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_language_prediction_from_model(self, model_id, document, token=None, url=API_GET_LANGUAGE_PREDICTION):
""" Gets a prediction based on a body of text you send to a trained model you created previously.
:param model_id: string, the ID for a model you created previously.
:param document: string, a body of text to be classified.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'modelId': (None, model_id), 'document':(None, document)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def parse_rectlabel_app_output(self):
""" Internal use mostly, finds all .json files in the current folder expecting them to all have been outputted by the RectLabel app
parses each file returning finally an array representing a csv file where each element is a row and the 1st element [0] is the
column headers.
Could be useful for subsequent string manipulation therefore not prefixed with an underscore
RectLabel info: https://rectlabel.com/
"""
# get json files only
files = []
files = [f for f in os.listdir() if f[-5:] == '.json']
if len(files) == 0:
print('No json files found in this directory')
return None
max_boxes = 0
rows = []
for each_file in files:
f = open(each_file, 'r')
j = f.read()
j = json.loads(j)
f.close()
# running count of the # of boxes.
if len(j['objects']) > max_boxes:
max_boxes = len(j['objects'])
# Each json file will end up being a row
# set labels
row = []
for o in j['objects']:
labels = {}
labels['label'] = o['label']
labels['x'] = o['x_y_w_h'][0]
labels['y'] = o['x_y_w_h'][1]
labels['width'] = o['x_y_w_h'][2]
labels['height'] = o['x_y_w_h'][3]
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + j['filename'] + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.XML_parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_parse_rectlabel_app_output(self):
files = []
files = [f for f in os.listdir() if f[-4:] == '.xml']
max_boxes = 0
rows = []
for f in files:
tree = ET.parse(f)
root = tree.getroot()
row = []
objects = root.findall('object')
print(objects)
if len(objects) > max_boxes:
max_boxes = len(objects)
for o in objects:
labels = {}
labels['label'] = o.find('name').text
labels['x'] = int(o.find('bndbox').find('xmin').text)
labels['y'] = int(o.find('bndbox').find('ymin').text)
labels['width'] = int(o.find('bndbox').find('xmax').text) - labels['x']
labels['height'] = int(o.find('bndbox').find('ymax').text) - labels['y']
print(labels)
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + root.find('filename').text + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def delete_dataset(self, dataset_id, token=None, url=API_GET_DATASETS_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_DATASETS_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(dataset_id)
r = requests.delete(the_url, headers=h)
return r
def delete_model(self, model_id, token=None, url=API_GET_MODEL_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODEL_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(model_id)
r = requests.delete(the_url, headers=h)
return r
def get_list_of_all_models(self, token=None):
d = self.get_datasets_info().json()['data']
models = []
self.get_token()
print('Found ' + str(len(d)) + ' datasets. An equal number of API calls will be made and it might take a while...')
for dataset in d:
mods = self.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
def get_list_of_all_models(einstein, token=None):
print('refreshing token...')
einstein.get_token()
d = einstein.get_datasets_info().json()['data']
models = []
print (f'\nfound {len(d)} datasets - will need same amount of callouts...\n')
for dataset in d:
mods = einstein.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
|
feliperyan/EinsteinVisionPython
|
EinsteinVision/EinsteinVision.py
|
EinsteinVisionService.get_model_info
|
python
|
def get_model_info(self, model_id, token=None, url=API_GET_MODEL_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
|
Gets information about a specific previously trained model, ie: stats and accuracy
:param model_id: string, model_id previously supplied by the API
returns: requests object
|
train
|
https://github.com/feliperyan/EinsteinVisionPython/blob/c761c46c7dc5fe8bbe7b15a6e1166a3585ed3cfb/EinsteinVision/EinsteinVision.py#L93-L103
|
[
"def check_for_token(self, token=None): \n if token:\n return token\n else:\n return self.token\n"
] |
class EinsteinVisionService:
""" A wrapper for Salesforce's Einstein Vision API.
:param token: string, in case you obtained a token somewhere else and want to use it here.
:param email: string, the username for your Enstein Vision account, not needed if you already have a token
:param rsa_cert: string, most likely coming straight from the Heroku Config Vars
:param pem_file: string, name of a file containing your secret key, defaults to predictive_services.pem
"""
def __init__(self, token=None, email=None, rsa_cert=None, pem_file='predictive_services.pem'):
self.token = token
self.email = email
if token is None:
if rsa_cert is None:
with open(pem_file, 'r') as pem:
self.private_key = pem.read()
else:
self.private_key = rsa_cert
def get_token(self):
""" Acquires a token for futher API calls, unless you already have a token this will be the first thing
you do before you use this.
:param email: string, the username for your EinsteinVision service, usually in email form
:para pem_file: string, file containing your Secret key. Copy contents of relevant Config Var
on Heroku to a file locally.
attention: this will set self.token on success
attention: currently spitting out results via a simple print
returns: requests object
"""
payload = {
'aud': API_OAUTH,
'exp': time.time()+600, # 10 minutes
'sub': self.email
}
header = {'Content-type':'application/x-www-form-urlencoded'}
assertion = jwt.encode(payload, self.private_key, algorithm='RS256')
assertion = assertion.decode('utf-8')
response = requests.post(
url=API_OAUTH,
headers=header,
data='grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion='+assertion
)
print(response.text)
if response.status_code == 200:
print('status 200 ok for Token')
self.token = response.json()['access_token']
else:
print('Could not get Token. Status: ' + str(response.status_code))
return response
def check_for_token(self, token=None):
if token:
return token
else:
return self.token
def get_datasets_info(self, token=None, url=API_GET_DATASETS_INFO):
""" Gets information on all datasets for this account
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.get(the_url, headers=h)
return r
def get_url_image_prediction(self, model_id, picture_url, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied picture url based on a previously trained model.
:param model_id: string, once you train a model you'll be given a model id to use.
:param picture_url: string, in the form of a url pointing to a publicly accessible
image file.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'sampleLocation':picture_url, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_fileb64_image_prediction(self, model_id, filename, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image on your machine, by encoding the image data as b64
and posting to the API.
:param model_id: string, once you train a model you'll be given a model id to use.
:param filename: string, the name of a file to be posted to the api.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
with open(filename, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def get_b64_image_prediction(self, model_id, b64_encoded_string, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image enconded as a b64 string, useful when uploading
images to a server backed by this library.
:param model_id: string, once you train a model you'll be given a model id to use.
:param b64_encoded_string: string, a b64 enconded string representation of an image.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
encoded_string = b64_encoded_string
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def create_dataset_synchronous(self, file_url, dataset_type='image', token=None, url=API_CREATE_DATASET):
""" Creates a dataset so you can train models from it
:param file_url: string, url to an accessible zip file containing the necessary image files
and folder structure indicating the labels to train. See docs online.
:param dataset_type: string, one of the dataset types, available options Nov 2017 were
'image', 'image-detection' and 'image-multi-label'.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'type':dataset_type, 'path':file_url})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def train_model(self, dataset_id, model_name, token=None, url=API_TRAIN_MODEL):
""" Train a model given a specifi dataset previously created
:param dataset_id: string, the id of a previously created dataset
:param model_name: string, what you will call this model
attention: This may take a while and a response will be returned before the model has
finished being trained. See docos and method get_training_status.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'name':model_name, 'datasetId':dataset_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_training_status(self, model_id, token=None, url=API_TRAIN_MODEL):
""" Gets status on the training process once you create a model
:param model_id: string, id of the model to check
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_models_info_for_dataset(self, dataset_id, token=None, url=API_GET_MODELS):
""" Gets metadata on all models available for given dataset id
:param dataset_id: string, previously obtained dataset id
warning: if providing your own url here, also include the dataset_id in the right place
as this method will not include it for you. Otherwise use the dataset_id attribute as
per usual
returns: a requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODELS:
r = requests.get(the_url, headers=h)
return r
the_url = url.replace('<dataset_id>', dataset_id)
r = requests.get(the_url, headers=h)
return r
def create_language_dataset_from_url(self, file_url, token=None, url=API_CREATE_LANGUAGE_DATASET):
""" Creates a dataset from a publicly accessible file stored in the cloud.
:param file_url: string, in the form of a URL to a file accessible on the cloud.
Popular options include Dropbox, AWS S3, Google Drive.
warning: Google Drive by default gives you a link to a web ui that allows you to download a file
NOT to the file directly. There is a way to change the link to point directly to the file as of 2018
as this may change, please search google for a solution.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'type': (None, 'text-intent'), 'path':(None, file_url)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
# example grive direct download:
# https://drive.google.com/uc?export=download&id=1ETMujAjIQgXVnAL-e99rTbeVjZk_4j5o
def train_language_model_from_dataset(self, dataset_id, name, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Trains a model given a dataset and its ID.
:param dataset_id: string, the ID for a dataset you created previously.
:param name: string, name for your model.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'name': (None, name), 'datasetId':(None, dataset_id)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def get_language_model_status(self, model_id, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Gets the status of your model, including whether the training has finished.
:param model_id: string, the ID for a model you created previously.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_language_prediction_from_model(self, model_id, document, token=None, url=API_GET_LANGUAGE_PREDICTION):
""" Gets a prediction based on a body of text you send to a trained model you created previously.
:param model_id: string, the ID for a model you created previously.
:param document: string, a body of text to be classified.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'modelId': (None, model_id), 'document':(None, document)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def parse_rectlabel_app_output(self):
""" Internal use mostly, finds all .json files in the current folder expecting them to all have been outputted by the RectLabel app
parses each file returning finally an array representing a csv file where each element is a row and the 1st element [0] is the
column headers.
Could be useful for subsequent string manipulation therefore not prefixed with an underscore
RectLabel info: https://rectlabel.com/
"""
# get json files only
files = []
files = [f for f in os.listdir() if f[-5:] == '.json']
if len(files) == 0:
print('No json files found in this directory')
return None
max_boxes = 0
rows = []
for each_file in files:
f = open(each_file, 'r')
j = f.read()
j = json.loads(j)
f.close()
# running count of the # of boxes.
if len(j['objects']) > max_boxes:
max_boxes = len(j['objects'])
# Each json file will end up being a row
# set labels
row = []
for o in j['objects']:
labels = {}
labels['label'] = o['label']
labels['x'] = o['x_y_w_h'][0]
labels['y'] = o['x_y_w_h'][1]
labels['width'] = o['x_y_w_h'][2]
labels['height'] = o['x_y_w_h'][3]
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + j['filename'] + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.XML_parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_parse_rectlabel_app_output(self):
files = []
files = [f for f in os.listdir() if f[-4:] == '.xml']
max_boxes = 0
rows = []
for f in files:
tree = ET.parse(f)
root = tree.getroot()
row = []
objects = root.findall('object')
print(objects)
if len(objects) > max_boxes:
max_boxes = len(objects)
for o in objects:
labels = {}
labels['label'] = o.find('name').text
labels['x'] = int(o.find('bndbox').find('xmin').text)
labels['y'] = int(o.find('bndbox').find('ymin').text)
labels['width'] = int(o.find('bndbox').find('xmax').text) - labels['x']
labels['height'] = int(o.find('bndbox').find('ymax').text) - labels['y']
print(labels)
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + root.find('filename').text + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def delete_dataset(self, dataset_id, token=None, url=API_GET_DATASETS_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_DATASETS_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(dataset_id)
r = requests.delete(the_url, headers=h)
return r
def delete_model(self, model_id, token=None, url=API_GET_MODEL_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODEL_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(model_id)
r = requests.delete(the_url, headers=h)
return r
def get_list_of_all_models(self, token=None):
d = self.get_datasets_info().json()['data']
models = []
self.get_token()
print('Found ' + str(len(d)) + ' datasets. An equal number of API calls will be made and it might take a while...')
for dataset in d:
mods = self.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
def get_list_of_all_models(einstein, token=None):
print('refreshing token...')
einstein.get_token()
d = einstein.get_datasets_info().json()['data']
models = []
print (f'\nfound {len(d)} datasets - will need same amount of callouts...\n')
for dataset in d:
mods = einstein.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
|
feliperyan/EinsteinVisionPython
|
EinsteinVision/EinsteinVision.py
|
EinsteinVisionService.get_datasets_info
|
python
|
def get_datasets_info(self, token=None, url=API_GET_DATASETS_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.get(the_url, headers=h)
return r
|
Gets information on all datasets for this account
returns: requests object
|
train
|
https://github.com/feliperyan/EinsteinVisionPython/blob/c761c46c7dc5fe8bbe7b15a6e1166a3585ed3cfb/EinsteinVision/EinsteinVision.py#L106-L115
|
[
"def check_for_token(self, token=None): \n if token:\n return token\n else:\n return self.token\n"
] |
class EinsteinVisionService:
""" A wrapper for Salesforce's Einstein Vision API.
:param token: string, in case you obtained a token somewhere else and want to use it here.
:param email: string, the username for your Enstein Vision account, not needed if you already have a token
:param rsa_cert: string, most likely coming straight from the Heroku Config Vars
:param pem_file: string, name of a file containing your secret key, defaults to predictive_services.pem
"""
def __init__(self, token=None, email=None, rsa_cert=None, pem_file='predictive_services.pem'):
self.token = token
self.email = email
if token is None:
if rsa_cert is None:
with open(pem_file, 'r') as pem:
self.private_key = pem.read()
else:
self.private_key = rsa_cert
def get_token(self):
""" Acquires a token for futher API calls, unless you already have a token this will be the first thing
you do before you use this.
:param email: string, the username for your EinsteinVision service, usually in email form
:para pem_file: string, file containing your Secret key. Copy contents of relevant Config Var
on Heroku to a file locally.
attention: this will set self.token on success
attention: currently spitting out results via a simple print
returns: requests object
"""
payload = {
'aud': API_OAUTH,
'exp': time.time()+600, # 10 minutes
'sub': self.email
}
header = {'Content-type':'application/x-www-form-urlencoded'}
assertion = jwt.encode(payload, self.private_key, algorithm='RS256')
assertion = assertion.decode('utf-8')
response = requests.post(
url=API_OAUTH,
headers=header,
data='grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion='+assertion
)
print(response.text)
if response.status_code == 200:
print('status 200 ok for Token')
self.token = response.json()['access_token']
else:
print('Could not get Token. Status: ' + str(response.status_code))
return response
def check_for_token(self, token=None):
if token:
return token
else:
return self.token
def get_model_info(self, model_id, token=None, url=API_GET_MODEL_INFO):
""" Gets information about a specific previously trained model, ie: stats and accuracy
:param model_id: string, model_id previously supplied by the API
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_url_image_prediction(self, model_id, picture_url, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied picture url based on a previously trained model.
:param model_id: string, once you train a model you'll be given a model id to use.
:param picture_url: string, in the form of a url pointing to a publicly accessible
image file.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'sampleLocation':picture_url, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_fileb64_image_prediction(self, model_id, filename, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image on your machine, by encoding the image data as b64
and posting to the API.
:param model_id: string, once you train a model you'll be given a model id to use.
:param filename: string, the name of a file to be posted to the api.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
with open(filename, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def get_b64_image_prediction(self, model_id, b64_encoded_string, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image enconded as a b64 string, useful when uploading
images to a server backed by this library.
:param model_id: string, once you train a model you'll be given a model id to use.
:param b64_encoded_string: string, a b64 enconded string representation of an image.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
encoded_string = b64_encoded_string
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def create_dataset_synchronous(self, file_url, dataset_type='image', token=None, url=API_CREATE_DATASET):
""" Creates a dataset so you can train models from it
:param file_url: string, url to an accessible zip file containing the necessary image files
and folder structure indicating the labels to train. See docs online.
:param dataset_type: string, one of the dataset types, available options Nov 2017 were
'image', 'image-detection' and 'image-multi-label'.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'type':dataset_type, 'path':file_url})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def train_model(self, dataset_id, model_name, token=None, url=API_TRAIN_MODEL):
""" Train a model given a specifi dataset previously created
:param dataset_id: string, the id of a previously created dataset
:param model_name: string, what you will call this model
attention: This may take a while and a response will be returned before the model has
finished being trained. See docos and method get_training_status.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'name':model_name, 'datasetId':dataset_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_training_status(self, model_id, token=None, url=API_TRAIN_MODEL):
""" Gets status on the training process once you create a model
:param model_id: string, id of the model to check
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_models_info_for_dataset(self, dataset_id, token=None, url=API_GET_MODELS):
""" Gets metadata on all models available for given dataset id
:param dataset_id: string, previously obtained dataset id
warning: if providing your own url here, also include the dataset_id in the right place
as this method will not include it for you. Otherwise use the dataset_id attribute as
per usual
returns: a requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODELS:
r = requests.get(the_url, headers=h)
return r
the_url = url.replace('<dataset_id>', dataset_id)
r = requests.get(the_url, headers=h)
return r
def create_language_dataset_from_url(self, file_url, token=None, url=API_CREATE_LANGUAGE_DATASET):
""" Creates a dataset from a publicly accessible file stored in the cloud.
:param file_url: string, in the form of a URL to a file accessible on the cloud.
Popular options include Dropbox, AWS S3, Google Drive.
warning: Google Drive by default gives you a link to a web ui that allows you to download a file
NOT to the file directly. There is a way to change the link to point directly to the file as of 2018
as this may change, please search google for a solution.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'type': (None, 'text-intent'), 'path':(None, file_url)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
# example grive direct download:
# https://drive.google.com/uc?export=download&id=1ETMujAjIQgXVnAL-e99rTbeVjZk_4j5o
def train_language_model_from_dataset(self, dataset_id, name, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Trains a model given a dataset and its ID.
:param dataset_id: string, the ID for a dataset you created previously.
:param name: string, name for your model.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'name': (None, name), 'datasetId':(None, dataset_id)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def get_language_model_status(self, model_id, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Gets the status of your model, including whether the training has finished.
:param model_id: string, the ID for a model you created previously.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_language_prediction_from_model(self, model_id, document, token=None, url=API_GET_LANGUAGE_PREDICTION):
""" Gets a prediction based on a body of text you send to a trained model you created previously.
:param model_id: string, the ID for a model you created previously.
:param document: string, a body of text to be classified.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'modelId': (None, model_id), 'document':(None, document)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def parse_rectlabel_app_output(self):
""" Internal use mostly, finds all .json files in the current folder expecting them to all have been outputted by the RectLabel app
parses each file returning finally an array representing a csv file where each element is a row and the 1st element [0] is the
column headers.
Could be useful for subsequent string manipulation therefore not prefixed with an underscore
RectLabel info: https://rectlabel.com/
"""
# get json files only
files = []
files = [f for f in os.listdir() if f[-5:] == '.json']
if len(files) == 0:
print('No json files found in this directory')
return None
max_boxes = 0
rows = []
for each_file in files:
f = open(each_file, 'r')
j = f.read()
j = json.loads(j)
f.close()
# running count of the # of boxes.
if len(j['objects']) > max_boxes:
max_boxes = len(j['objects'])
# Each json file will end up being a row
# set labels
row = []
for o in j['objects']:
labels = {}
labels['label'] = o['label']
labels['x'] = o['x_y_w_h'][0]
labels['y'] = o['x_y_w_h'][1]
labels['width'] = o['x_y_w_h'][2]
labels['height'] = o['x_y_w_h'][3]
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + j['filename'] + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.XML_parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_parse_rectlabel_app_output(self):
files = []
files = [f for f in os.listdir() if f[-4:] == '.xml']
max_boxes = 0
rows = []
for f in files:
tree = ET.parse(f)
root = tree.getroot()
row = []
objects = root.findall('object')
print(objects)
if len(objects) > max_boxes:
max_boxes = len(objects)
for o in objects:
labels = {}
labels['label'] = o.find('name').text
labels['x'] = int(o.find('bndbox').find('xmin').text)
labels['y'] = int(o.find('bndbox').find('ymin').text)
labels['width'] = int(o.find('bndbox').find('xmax').text) - labels['x']
labels['height'] = int(o.find('bndbox').find('ymax').text) - labels['y']
print(labels)
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + root.find('filename').text + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def delete_dataset(self, dataset_id, token=None, url=API_GET_DATASETS_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_DATASETS_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(dataset_id)
r = requests.delete(the_url, headers=h)
return r
def delete_model(self, model_id, token=None, url=API_GET_MODEL_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODEL_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(model_id)
r = requests.delete(the_url, headers=h)
return r
def get_list_of_all_models(self, token=None):
d = self.get_datasets_info().json()['data']
models = []
self.get_token()
print('Found ' + str(len(d)) + ' datasets. An equal number of API calls will be made and it might take a while...')
for dataset in d:
mods = self.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
def get_list_of_all_models(einstein, token=None):
print('refreshing token...')
einstein.get_token()
d = einstein.get_datasets_info().json()['data']
models = []
print (f'\nfound {len(d)} datasets - will need same amount of callouts...\n')
for dataset in d:
mods = einstein.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
|
feliperyan/EinsteinVisionPython
|
EinsteinVision/EinsteinVision.py
|
EinsteinVisionService.get_url_image_prediction
|
python
|
def get_url_image_prediction(self, model_id, picture_url, token=None, url=API_GET_PREDICTION_IMAGE_URL):
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'sampleLocation':picture_url, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
|
Gets a prediction from a supplied picture url based on a previously trained model.
:param model_id: string, once you train a model you'll be given a model id to use.
:param picture_url: string, in the form of a url pointing to a publicly accessible
image file.
returns: requests object
|
train
|
https://github.com/feliperyan/EinsteinVisionPython/blob/c761c46c7dc5fe8bbe7b15a6e1166a3585ed3cfb/EinsteinVision/EinsteinVision.py#L118-L131
|
[
"def check_for_token(self, token=None): \n if token:\n return token\n else:\n return self.token\n"
] |
class EinsteinVisionService:
""" A wrapper for Salesforce's Einstein Vision API.
:param token: string, in case you obtained a token somewhere else and want to use it here.
:param email: string, the username for your Enstein Vision account, not needed if you already have a token
:param rsa_cert: string, most likely coming straight from the Heroku Config Vars
:param pem_file: string, name of a file containing your secret key, defaults to predictive_services.pem
"""
def __init__(self, token=None, email=None, rsa_cert=None, pem_file='predictive_services.pem'):
self.token = token
self.email = email
if token is None:
if rsa_cert is None:
with open(pem_file, 'r') as pem:
self.private_key = pem.read()
else:
self.private_key = rsa_cert
def get_token(self):
""" Acquires a token for futher API calls, unless you already have a token this will be the first thing
you do before you use this.
:param email: string, the username for your EinsteinVision service, usually in email form
:para pem_file: string, file containing your Secret key. Copy contents of relevant Config Var
on Heroku to a file locally.
attention: this will set self.token on success
attention: currently spitting out results via a simple print
returns: requests object
"""
payload = {
'aud': API_OAUTH,
'exp': time.time()+600, # 10 minutes
'sub': self.email
}
header = {'Content-type':'application/x-www-form-urlencoded'}
assertion = jwt.encode(payload, self.private_key, algorithm='RS256')
assertion = assertion.decode('utf-8')
response = requests.post(
url=API_OAUTH,
headers=header,
data='grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion='+assertion
)
print(response.text)
if response.status_code == 200:
print('status 200 ok for Token')
self.token = response.json()['access_token']
else:
print('Could not get Token. Status: ' + str(response.status_code))
return response
def check_for_token(self, token=None):
if token:
return token
else:
return self.token
def get_model_info(self, model_id, token=None, url=API_GET_MODEL_INFO):
""" Gets information about a specific previously trained model, ie: stats and accuracy
:param model_id: string, model_id previously supplied by the API
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_datasets_info(self, token=None, url=API_GET_DATASETS_INFO):
""" Gets information on all datasets for this account
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.get(the_url, headers=h)
return r
def get_fileb64_image_prediction(self, model_id, filename, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image on your machine, by encoding the image data as b64
and posting to the API.
:param model_id: string, once you train a model you'll be given a model id to use.
:param filename: string, the name of a file to be posted to the api.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
with open(filename, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def get_b64_image_prediction(self, model_id, b64_encoded_string, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image enconded as a b64 string, useful when uploading
images to a server backed by this library.
:param model_id: string, once you train a model you'll be given a model id to use.
:param b64_encoded_string: string, a b64 enconded string representation of an image.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
encoded_string = b64_encoded_string
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def create_dataset_synchronous(self, file_url, dataset_type='image', token=None, url=API_CREATE_DATASET):
""" Creates a dataset so you can train models from it
:param file_url: string, url to an accessible zip file containing the necessary image files
and folder structure indicating the labels to train. See docs online.
:param dataset_type: string, one of the dataset types, available options Nov 2017 were
'image', 'image-detection' and 'image-multi-label'.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'type':dataset_type, 'path':file_url})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def train_model(self, dataset_id, model_name, token=None, url=API_TRAIN_MODEL):
""" Train a model given a specifi dataset previously created
:param dataset_id: string, the id of a previously created dataset
:param model_name: string, what you will call this model
attention: This may take a while and a response will be returned before the model has
finished being trained. See docos and method get_training_status.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'name':model_name, 'datasetId':dataset_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_training_status(self, model_id, token=None, url=API_TRAIN_MODEL):
""" Gets status on the training process once you create a model
:param model_id: string, id of the model to check
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_models_info_for_dataset(self, dataset_id, token=None, url=API_GET_MODELS):
""" Gets metadata on all models available for given dataset id
:param dataset_id: string, previously obtained dataset id
warning: if providing your own url here, also include the dataset_id in the right place
as this method will not include it for you. Otherwise use the dataset_id attribute as
per usual
returns: a requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODELS:
r = requests.get(the_url, headers=h)
return r
the_url = url.replace('<dataset_id>', dataset_id)
r = requests.get(the_url, headers=h)
return r
def create_language_dataset_from_url(self, file_url, token=None, url=API_CREATE_LANGUAGE_DATASET):
""" Creates a dataset from a publicly accessible file stored in the cloud.
:param file_url: string, in the form of a URL to a file accessible on the cloud.
Popular options include Dropbox, AWS S3, Google Drive.
warning: Google Drive by default gives you a link to a web ui that allows you to download a file
NOT to the file directly. There is a way to change the link to point directly to the file as of 2018
as this may change, please search google for a solution.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'type': (None, 'text-intent'), 'path':(None, file_url)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
# example grive direct download:
# https://drive.google.com/uc?export=download&id=1ETMujAjIQgXVnAL-e99rTbeVjZk_4j5o
def train_language_model_from_dataset(self, dataset_id, name, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Trains a model given a dataset and its ID.
:param dataset_id: string, the ID for a dataset you created previously.
:param name: string, name for your model.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'name': (None, name), 'datasetId':(None, dataset_id)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def get_language_model_status(self, model_id, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Gets the status of your model, including whether the training has finished.
:param model_id: string, the ID for a model you created previously.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_language_prediction_from_model(self, model_id, document, token=None, url=API_GET_LANGUAGE_PREDICTION):
""" Gets a prediction based on a body of text you send to a trained model you created previously.
:param model_id: string, the ID for a model you created previously.
:param document: string, a body of text to be classified.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'modelId': (None, model_id), 'document':(None, document)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def parse_rectlabel_app_output(self):
""" Internal use mostly, finds all .json files in the current folder expecting them to all have been outputted by the RectLabel app
parses each file returning finally an array representing a csv file where each element is a row and the 1st element [0] is the
column headers.
Could be useful for subsequent string manipulation therefore not prefixed with an underscore
RectLabel info: https://rectlabel.com/
"""
# get json files only
files = []
files = [f for f in os.listdir() if f[-5:] == '.json']
if len(files) == 0:
print('No json files found in this directory')
return None
max_boxes = 0
rows = []
for each_file in files:
f = open(each_file, 'r')
j = f.read()
j = json.loads(j)
f.close()
# running count of the # of boxes.
if len(j['objects']) > max_boxes:
max_boxes = len(j['objects'])
# Each json file will end up being a row
# set labels
row = []
for o in j['objects']:
labels = {}
labels['label'] = o['label']
labels['x'] = o['x_y_w_h'][0]
labels['y'] = o['x_y_w_h'][1]
labels['width'] = o['x_y_w_h'][2]
labels['height'] = o['x_y_w_h'][3]
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + j['filename'] + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.XML_parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_parse_rectlabel_app_output(self):
files = []
files = [f for f in os.listdir() if f[-4:] == '.xml']
max_boxes = 0
rows = []
for f in files:
tree = ET.parse(f)
root = tree.getroot()
row = []
objects = root.findall('object')
print(objects)
if len(objects) > max_boxes:
max_boxes = len(objects)
for o in objects:
labels = {}
labels['label'] = o.find('name').text
labels['x'] = int(o.find('bndbox').find('xmin').text)
labels['y'] = int(o.find('bndbox').find('ymin').text)
labels['width'] = int(o.find('bndbox').find('xmax').text) - labels['x']
labels['height'] = int(o.find('bndbox').find('ymax').text) - labels['y']
print(labels)
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + root.find('filename').text + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def delete_dataset(self, dataset_id, token=None, url=API_GET_DATASETS_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_DATASETS_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(dataset_id)
r = requests.delete(the_url, headers=h)
return r
def delete_model(self, model_id, token=None, url=API_GET_MODEL_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODEL_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(model_id)
r = requests.delete(the_url, headers=h)
return r
def get_list_of_all_models(self, token=None):
d = self.get_datasets_info().json()['data']
models = []
self.get_token()
print('Found ' + str(len(d)) + ' datasets. An equal number of API calls will be made and it might take a while...')
for dataset in d:
mods = self.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
def get_list_of_all_models(einstein, token=None):
print('refreshing token...')
einstein.get_token()
d = einstein.get_datasets_info().json()['data']
models = []
print (f'\nfound {len(d)} datasets - will need same amount of callouts...\n')
for dataset in d:
mods = einstein.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
|
feliperyan/EinsteinVisionPython
|
EinsteinVision/EinsteinVision.py
|
EinsteinVisionService.get_fileb64_image_prediction
|
python
|
def get_fileb64_image_prediction(self, model_id, filename, token=None, url=API_GET_PREDICTION_IMAGE_URL):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
with open(filename, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
|
Gets a prediction from a supplied image on your machine, by encoding the image data as b64
and posting to the API.
:param model_id: string, once you train a model you'll be given a model id to use.
:param filename: string, the name of a file to be posted to the api.
returns: requests object
|
train
|
https://github.com/feliperyan/EinsteinVisionPython/blob/c761c46c7dc5fe8bbe7b15a6e1166a3585ed3cfb/EinsteinVision/EinsteinVision.py#L134-L152
|
[
"def check_for_token(self, token=None): \n if token:\n return token\n else:\n return self.token\n"
] |
class EinsteinVisionService:
""" A wrapper for Salesforce's Einstein Vision API.
:param token: string, in case you obtained a token somewhere else and want to use it here.
:param email: string, the username for your Enstein Vision account, not needed if you already have a token
:param rsa_cert: string, most likely coming straight from the Heroku Config Vars
:param pem_file: string, name of a file containing your secret key, defaults to predictive_services.pem
"""
def __init__(self, token=None, email=None, rsa_cert=None, pem_file='predictive_services.pem'):
self.token = token
self.email = email
if token is None:
if rsa_cert is None:
with open(pem_file, 'r') as pem:
self.private_key = pem.read()
else:
self.private_key = rsa_cert
def get_token(self):
""" Acquires a token for futher API calls, unless you already have a token this will be the first thing
you do before you use this.
:param email: string, the username for your EinsteinVision service, usually in email form
:para pem_file: string, file containing your Secret key. Copy contents of relevant Config Var
on Heroku to a file locally.
attention: this will set self.token on success
attention: currently spitting out results via a simple print
returns: requests object
"""
payload = {
'aud': API_OAUTH,
'exp': time.time()+600, # 10 minutes
'sub': self.email
}
header = {'Content-type':'application/x-www-form-urlencoded'}
assertion = jwt.encode(payload, self.private_key, algorithm='RS256')
assertion = assertion.decode('utf-8')
response = requests.post(
url=API_OAUTH,
headers=header,
data='grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion='+assertion
)
print(response.text)
if response.status_code == 200:
print('status 200 ok for Token')
self.token = response.json()['access_token']
else:
print('Could not get Token. Status: ' + str(response.status_code))
return response
def check_for_token(self, token=None):
if token:
return token
else:
return self.token
def get_model_info(self, model_id, token=None, url=API_GET_MODEL_INFO):
""" Gets information about a specific previously trained model, ie: stats and accuracy
:param model_id: string, model_id previously supplied by the API
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_datasets_info(self, token=None, url=API_GET_DATASETS_INFO):
""" Gets information on all datasets for this account
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.get(the_url, headers=h)
return r
def get_url_image_prediction(self, model_id, picture_url, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied picture url based on a previously trained model.
:param model_id: string, once you train a model you'll be given a model id to use.
:param picture_url: string, in the form of a url pointing to a publicly accessible
image file.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'sampleLocation':picture_url, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_b64_image_prediction(self, model_id, b64_encoded_string, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image enconded as a b64 string, useful when uploading
images to a server backed by this library.
:param model_id: string, once you train a model you'll be given a model id to use.
:param b64_encoded_string: string, a b64 enconded string representation of an image.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
encoded_string = b64_encoded_string
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def create_dataset_synchronous(self, file_url, dataset_type='image', token=None, url=API_CREATE_DATASET):
""" Creates a dataset so you can train models from it
:param file_url: string, url to an accessible zip file containing the necessary image files
and folder structure indicating the labels to train. See docs online.
:param dataset_type: string, one of the dataset types, available options Nov 2017 were
'image', 'image-detection' and 'image-multi-label'.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'type':dataset_type, 'path':file_url})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def train_model(self, dataset_id, model_name, token=None, url=API_TRAIN_MODEL):
""" Train a model given a specifi dataset previously created
:param dataset_id: string, the id of a previously created dataset
:param model_name: string, what you will call this model
attention: This may take a while and a response will be returned before the model has
finished being trained. See docos and method get_training_status.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'name':model_name, 'datasetId':dataset_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_training_status(self, model_id, token=None, url=API_TRAIN_MODEL):
""" Gets status on the training process once you create a model
:param model_id: string, id of the model to check
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_models_info_for_dataset(self, dataset_id, token=None, url=API_GET_MODELS):
""" Gets metadata on all models available for given dataset id
:param dataset_id: string, previously obtained dataset id
warning: if providing your own url here, also include the dataset_id in the right place
as this method will not include it for you. Otherwise use the dataset_id attribute as
per usual
returns: a requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODELS:
r = requests.get(the_url, headers=h)
return r
the_url = url.replace('<dataset_id>', dataset_id)
r = requests.get(the_url, headers=h)
return r
def create_language_dataset_from_url(self, file_url, token=None, url=API_CREATE_LANGUAGE_DATASET):
""" Creates a dataset from a publicly accessible file stored in the cloud.
:param file_url: string, in the form of a URL to a file accessible on the cloud.
Popular options include Dropbox, AWS S3, Google Drive.
warning: Google Drive by default gives you a link to a web ui that allows you to download a file
NOT to the file directly. There is a way to change the link to point directly to the file as of 2018
as this may change, please search google for a solution.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'type': (None, 'text-intent'), 'path':(None, file_url)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
# example grive direct download:
# https://drive.google.com/uc?export=download&id=1ETMujAjIQgXVnAL-e99rTbeVjZk_4j5o
def train_language_model_from_dataset(self, dataset_id, name, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Trains a model given a dataset and its ID.
:param dataset_id: string, the ID for a dataset you created previously.
:param name: string, name for your model.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'name': (None, name), 'datasetId':(None, dataset_id)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def get_language_model_status(self, model_id, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Gets the status of your model, including whether the training has finished.
:param model_id: string, the ID for a model you created previously.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_language_prediction_from_model(self, model_id, document, token=None, url=API_GET_LANGUAGE_PREDICTION):
""" Gets a prediction based on a body of text you send to a trained model you created previously.
:param model_id: string, the ID for a model you created previously.
:param document: string, a body of text to be classified.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'modelId': (None, model_id), 'document':(None, document)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def parse_rectlabel_app_output(self):
""" Internal use mostly, finds all .json files in the current folder expecting them to all have been outputted by the RectLabel app
parses each file returning finally an array representing a csv file where each element is a row and the 1st element [0] is the
column headers.
Could be useful for subsequent string manipulation therefore not prefixed with an underscore
RectLabel info: https://rectlabel.com/
"""
# get json files only
files = []
files = [f for f in os.listdir() if f[-5:] == '.json']
if len(files) == 0:
print('No json files found in this directory')
return None
max_boxes = 0
rows = []
for each_file in files:
f = open(each_file, 'r')
j = f.read()
j = json.loads(j)
f.close()
# running count of the # of boxes.
if len(j['objects']) > max_boxes:
max_boxes = len(j['objects'])
# Each json file will end up being a row
# set labels
row = []
for o in j['objects']:
labels = {}
labels['label'] = o['label']
labels['x'] = o['x_y_w_h'][0]
labels['y'] = o['x_y_w_h'][1]
labels['width'] = o['x_y_w_h'][2]
labels['height'] = o['x_y_w_h'][3]
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + j['filename'] + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.XML_parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_parse_rectlabel_app_output(self):
files = []
files = [f for f in os.listdir() if f[-4:] == '.xml']
max_boxes = 0
rows = []
for f in files:
tree = ET.parse(f)
root = tree.getroot()
row = []
objects = root.findall('object')
print(objects)
if len(objects) > max_boxes:
max_boxes = len(objects)
for o in objects:
labels = {}
labels['label'] = o.find('name').text
labels['x'] = int(o.find('bndbox').find('xmin').text)
labels['y'] = int(o.find('bndbox').find('ymin').text)
labels['width'] = int(o.find('bndbox').find('xmax').text) - labels['x']
labels['height'] = int(o.find('bndbox').find('ymax').text) - labels['y']
print(labels)
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + root.find('filename').text + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def delete_dataset(self, dataset_id, token=None, url=API_GET_DATASETS_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_DATASETS_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(dataset_id)
r = requests.delete(the_url, headers=h)
return r
def delete_model(self, model_id, token=None, url=API_GET_MODEL_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODEL_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(model_id)
r = requests.delete(the_url, headers=h)
return r
def get_list_of_all_models(self, token=None):
d = self.get_datasets_info().json()['data']
models = []
self.get_token()
print('Found ' + str(len(d)) + ' datasets. An equal number of API calls will be made and it might take a while...')
for dataset in d:
mods = self.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
def get_list_of_all_models(einstein, token=None):
print('refreshing token...')
einstein.get_token()
d = einstein.get_datasets_info().json()['data']
models = []
print (f'\nfound {len(d)} datasets - will need same amount of callouts...\n')
for dataset in d:
mods = einstein.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
|
feliperyan/EinsteinVisionPython
|
EinsteinVision/EinsteinVision.py
|
EinsteinVisionService.get_b64_image_prediction
|
python
|
def get_b64_image_prediction(self, model_id, b64_encoded_string, token=None, url=API_GET_PREDICTION_IMAGE_URL):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
encoded_string = b64_encoded_string
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
|
Gets a prediction from a supplied image enconded as a b64 string, useful when uploading
images to a server backed by this library.
:param model_id: string, once you train a model you'll be given a model id to use.
:param b64_encoded_string: string, a b64 enconded string representation of an image.
returns: requests object
|
train
|
https://github.com/feliperyan/EinsteinVisionPython/blob/c761c46c7dc5fe8bbe7b15a6e1166a3585ed3cfb/EinsteinVision/EinsteinVision.py#L155-L172
|
[
"def check_for_token(self, token=None): \n if token:\n return token\n else:\n return self.token\n"
] |
class EinsteinVisionService:
""" A wrapper for Salesforce's Einstein Vision API.
:param token: string, in case you obtained a token somewhere else and want to use it here.
:param email: string, the username for your Enstein Vision account, not needed if you already have a token
:param rsa_cert: string, most likely coming straight from the Heroku Config Vars
:param pem_file: string, name of a file containing your secret key, defaults to predictive_services.pem
"""
def __init__(self, token=None, email=None, rsa_cert=None, pem_file='predictive_services.pem'):
self.token = token
self.email = email
if token is None:
if rsa_cert is None:
with open(pem_file, 'r') as pem:
self.private_key = pem.read()
else:
self.private_key = rsa_cert
def get_token(self):
""" Acquires a token for futher API calls, unless you already have a token this will be the first thing
you do before you use this.
:param email: string, the username for your EinsteinVision service, usually in email form
:para pem_file: string, file containing your Secret key. Copy contents of relevant Config Var
on Heroku to a file locally.
attention: this will set self.token on success
attention: currently spitting out results via a simple print
returns: requests object
"""
payload = {
'aud': API_OAUTH,
'exp': time.time()+600, # 10 minutes
'sub': self.email
}
header = {'Content-type':'application/x-www-form-urlencoded'}
assertion = jwt.encode(payload, self.private_key, algorithm='RS256')
assertion = assertion.decode('utf-8')
response = requests.post(
url=API_OAUTH,
headers=header,
data='grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion='+assertion
)
print(response.text)
if response.status_code == 200:
print('status 200 ok for Token')
self.token = response.json()['access_token']
else:
print('Could not get Token. Status: ' + str(response.status_code))
return response
def check_for_token(self, token=None):
if token:
return token
else:
return self.token
def get_model_info(self, model_id, token=None, url=API_GET_MODEL_INFO):
""" Gets information about a specific previously trained model, ie: stats and accuracy
:param model_id: string, model_id previously supplied by the API
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_datasets_info(self, token=None, url=API_GET_DATASETS_INFO):
""" Gets information on all datasets for this account
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.get(the_url, headers=h)
return r
def get_url_image_prediction(self, model_id, picture_url, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied picture url based on a previously trained model.
:param model_id: string, once you train a model you'll be given a model id to use.
:param picture_url: string, in the form of a url pointing to a publicly accessible
image file.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'sampleLocation':picture_url, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_fileb64_image_prediction(self, model_id, filename, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image on your machine, by encoding the image data as b64
and posting to the API.
:param model_id: string, once you train a model you'll be given a model id to use.
:param filename: string, the name of a file to be posted to the api.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
with open(filename, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def get_b64_image_prediction(self, model_id, b64_encoded_string, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image enconded as a b64 string, useful when uploading
images to a server backed by this library.
:param model_id: string, once you train a model you'll be given a model id to use.
:param b64_encoded_string: string, a b64 enconded string representation of an image.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
encoded_string = b64_encoded_string
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def create_dataset_synchronous(self, file_url, dataset_type='image', token=None, url=API_CREATE_DATASET):
""" Creates a dataset so you can train models from it
:param file_url: string, url to an accessible zip file containing the necessary image files
and folder structure indicating the labels to train. See docs online.
:param dataset_type: string, one of the dataset types, available options Nov 2017 were
'image', 'image-detection' and 'image-multi-label'.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'type':dataset_type, 'path':file_url})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def train_model(self, dataset_id, model_name, token=None, url=API_TRAIN_MODEL):
""" Train a model given a specifi dataset previously created
:param dataset_id: string, the id of a previously created dataset
:param model_name: string, what you will call this model
attention: This may take a while and a response will be returned before the model has
finished being trained. See docos and method get_training_status.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'name':model_name, 'datasetId':dataset_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_training_status(self, model_id, token=None, url=API_TRAIN_MODEL):
""" Gets status on the training process once you create a model
:param model_id: string, id of the model to check
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_models_info_for_dataset(self, dataset_id, token=None, url=API_GET_MODELS):
""" Gets metadata on all models available for given dataset id
:param dataset_id: string, previously obtained dataset id
warning: if providing your own url here, also include the dataset_id in the right place
as this method will not include it for you. Otherwise use the dataset_id attribute as
per usual
returns: a requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODELS:
r = requests.get(the_url, headers=h)
return r
the_url = url.replace('<dataset_id>', dataset_id)
r = requests.get(the_url, headers=h)
return r
def create_language_dataset_from_url(self, file_url, token=None, url=API_CREATE_LANGUAGE_DATASET):
""" Creates a dataset from a publicly accessible file stored in the cloud.
:param file_url: string, in the form of a URL to a file accessible on the cloud.
Popular options include Dropbox, AWS S3, Google Drive.
warning: Google Drive by default gives you a link to a web ui that allows you to download a file
NOT to the file directly. There is a way to change the link to point directly to the file as of 2018
as this may change, please search google for a solution.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'type': (None, 'text-intent'), 'path':(None, file_url)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
# example grive direct download:
# https://drive.google.com/uc?export=download&id=1ETMujAjIQgXVnAL-e99rTbeVjZk_4j5o
def train_language_model_from_dataset(self, dataset_id, name, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Trains a model given a dataset and its ID.
:param dataset_id: string, the ID for a dataset you created previously.
:param name: string, name for your model.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'name': (None, name), 'datasetId':(None, dataset_id)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def get_language_model_status(self, model_id, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Gets the status of your model, including whether the training has finished.
:param model_id: string, the ID for a model you created previously.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_language_prediction_from_model(self, model_id, document, token=None, url=API_GET_LANGUAGE_PREDICTION):
""" Gets a prediction based on a body of text you send to a trained model you created previously.
:param model_id: string, the ID for a model you created previously.
:param document: string, a body of text to be classified.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'modelId': (None, model_id), 'document':(None, document)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def parse_rectlabel_app_output(self):
""" Internal use mostly, finds all .json files in the current folder expecting them to all have been outputted by the RectLabel app
parses each file returning finally an array representing a csv file where each element is a row and the 1st element [0] is the
column headers.
Could be useful for subsequent string manipulation therefore not prefixed with an underscore
RectLabel info: https://rectlabel.com/
"""
# get json files only
files = []
files = [f for f in os.listdir() if f[-5:] == '.json']
if len(files) == 0:
print('No json files found in this directory')
return None
max_boxes = 0
rows = []
for each_file in files:
f = open(each_file, 'r')
j = f.read()
j = json.loads(j)
f.close()
# running count of the # of boxes.
if len(j['objects']) > max_boxes:
max_boxes = len(j['objects'])
# Each json file will end up being a row
# set labels
row = []
for o in j['objects']:
labels = {}
labels['label'] = o['label']
labels['x'] = o['x_y_w_h'][0]
labels['y'] = o['x_y_w_h'][1]
labels['width'] = o['x_y_w_h'][2]
labels['height'] = o['x_y_w_h'][3]
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + j['filename'] + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.XML_parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_parse_rectlabel_app_output(self):
files = []
files = [f for f in os.listdir() if f[-4:] == '.xml']
max_boxes = 0
rows = []
for f in files:
tree = ET.parse(f)
root = tree.getroot()
row = []
objects = root.findall('object')
print(objects)
if len(objects) > max_boxes:
max_boxes = len(objects)
for o in objects:
labels = {}
labels['label'] = o.find('name').text
labels['x'] = int(o.find('bndbox').find('xmin').text)
labels['y'] = int(o.find('bndbox').find('ymin').text)
labels['width'] = int(o.find('bndbox').find('xmax').text) - labels['x']
labels['height'] = int(o.find('bndbox').find('ymax').text) - labels['y']
print(labels)
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + root.find('filename').text + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def delete_dataset(self, dataset_id, token=None, url=API_GET_DATASETS_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_DATASETS_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(dataset_id)
r = requests.delete(the_url, headers=h)
return r
def delete_model(self, model_id, token=None, url=API_GET_MODEL_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODEL_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(model_id)
r = requests.delete(the_url, headers=h)
return r
def get_list_of_all_models(self, token=None):
d = self.get_datasets_info().json()['data']
models = []
self.get_token()
print('Found ' + str(len(d)) + ' datasets. An equal number of API calls will be made and it might take a while...')
for dataset in d:
mods = self.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
def get_list_of_all_models(einstein, token=None):
print('refreshing token...')
einstein.get_token()
d = einstein.get_datasets_info().json()['data']
models = []
print (f'\nfound {len(d)} datasets - will need same amount of callouts...\n')
for dataset in d:
mods = einstein.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
|
feliperyan/EinsteinVisionPython
|
EinsteinVision/EinsteinVision.py
|
EinsteinVisionService.create_dataset_synchronous
|
python
|
def create_dataset_synchronous(self, file_url, dataset_type='image', token=None, url=API_CREATE_DATASET):
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'type':dataset_type, 'path':file_url})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
|
Creates a dataset so you can train models from it
:param file_url: string, url to an accessible zip file containing the necessary image files
and folder structure indicating the labels to train. See docs online.
:param dataset_type: string, one of the dataset types, available options Nov 2017 were
'image', 'image-detection' and 'image-multi-label'.
returns: requests object
|
train
|
https://github.com/feliperyan/EinsteinVisionPython/blob/c761c46c7dc5fe8bbe7b15a6e1166a3585ed3cfb/EinsteinVision/EinsteinVision.py#L175-L189
|
[
"def check_for_token(self, token=None): \n if token:\n return token\n else:\n return self.token\n"
] |
class EinsteinVisionService:
""" A wrapper for Salesforce's Einstein Vision API.
:param token: string, in case you obtained a token somewhere else and want to use it here.
:param email: string, the username for your Enstein Vision account, not needed if you already have a token
:param rsa_cert: string, most likely coming straight from the Heroku Config Vars
:param pem_file: string, name of a file containing your secret key, defaults to predictive_services.pem
"""
def __init__(self, token=None, email=None, rsa_cert=None, pem_file='predictive_services.pem'):
self.token = token
self.email = email
if token is None:
if rsa_cert is None:
with open(pem_file, 'r') as pem:
self.private_key = pem.read()
else:
self.private_key = rsa_cert
def get_token(self):
""" Acquires a token for futher API calls, unless you already have a token this will be the first thing
you do before you use this.
:param email: string, the username for your EinsteinVision service, usually in email form
:para pem_file: string, file containing your Secret key. Copy contents of relevant Config Var
on Heroku to a file locally.
attention: this will set self.token on success
attention: currently spitting out results via a simple print
returns: requests object
"""
payload = {
'aud': API_OAUTH,
'exp': time.time()+600, # 10 minutes
'sub': self.email
}
header = {'Content-type':'application/x-www-form-urlencoded'}
assertion = jwt.encode(payload, self.private_key, algorithm='RS256')
assertion = assertion.decode('utf-8')
response = requests.post(
url=API_OAUTH,
headers=header,
data='grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion='+assertion
)
print(response.text)
if response.status_code == 200:
print('status 200 ok for Token')
self.token = response.json()['access_token']
else:
print('Could not get Token. Status: ' + str(response.status_code))
return response
def check_for_token(self, token=None):
if token:
return token
else:
return self.token
def get_model_info(self, model_id, token=None, url=API_GET_MODEL_INFO):
""" Gets information about a specific previously trained model, ie: stats and accuracy
:param model_id: string, model_id previously supplied by the API
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_datasets_info(self, token=None, url=API_GET_DATASETS_INFO):
""" Gets information on all datasets for this account
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.get(the_url, headers=h)
return r
def get_url_image_prediction(self, model_id, picture_url, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied picture url based on a previously trained model.
:param model_id: string, once you train a model you'll be given a model id to use.
:param picture_url: string, in the form of a url pointing to a publicly accessible
image file.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'sampleLocation':picture_url, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_fileb64_image_prediction(self, model_id, filename, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image on your machine, by encoding the image data as b64
and posting to the API.
:param model_id: string, once you train a model you'll be given a model id to use.
:param filename: string, the name of a file to be posted to the api.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
with open(filename, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def get_b64_image_prediction(self, model_id, b64_encoded_string, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image enconded as a b64 string, useful when uploading
images to a server backed by this library.
:param model_id: string, once you train a model you'll be given a model id to use.
:param b64_encoded_string: string, a b64 enconded string representation of an image.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
encoded_string = b64_encoded_string
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def train_model(self, dataset_id, model_name, token=None, url=API_TRAIN_MODEL):
""" Train a model given a specifi dataset previously created
:param dataset_id: string, the id of a previously created dataset
:param model_name: string, what you will call this model
attention: This may take a while and a response will be returned before the model has
finished being trained. See docos and method get_training_status.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'name':model_name, 'datasetId':dataset_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_training_status(self, model_id, token=None, url=API_TRAIN_MODEL):
""" Gets status on the training process once you create a model
:param model_id: string, id of the model to check
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_models_info_for_dataset(self, dataset_id, token=None, url=API_GET_MODELS):
""" Gets metadata on all models available for given dataset id
:param dataset_id: string, previously obtained dataset id
warning: if providing your own url here, also include the dataset_id in the right place
as this method will not include it for you. Otherwise use the dataset_id attribute as
per usual
returns: a requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODELS:
r = requests.get(the_url, headers=h)
return r
the_url = url.replace('<dataset_id>', dataset_id)
r = requests.get(the_url, headers=h)
return r
def create_language_dataset_from_url(self, file_url, token=None, url=API_CREATE_LANGUAGE_DATASET):
""" Creates a dataset from a publicly accessible file stored in the cloud.
:param file_url: string, in the form of a URL to a file accessible on the cloud.
Popular options include Dropbox, AWS S3, Google Drive.
warning: Google Drive by default gives you a link to a web ui that allows you to download a file
NOT to the file directly. There is a way to change the link to point directly to the file as of 2018
as this may change, please search google for a solution.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'type': (None, 'text-intent'), 'path':(None, file_url)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
# example grive direct download:
# https://drive.google.com/uc?export=download&id=1ETMujAjIQgXVnAL-e99rTbeVjZk_4j5o
def train_language_model_from_dataset(self, dataset_id, name, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Trains a model given a dataset and its ID.
:param dataset_id: string, the ID for a dataset you created previously.
:param name: string, name for your model.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'name': (None, name), 'datasetId':(None, dataset_id)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def get_language_model_status(self, model_id, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Gets the status of your model, including whether the training has finished.
:param model_id: string, the ID for a model you created previously.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_language_prediction_from_model(self, model_id, document, token=None, url=API_GET_LANGUAGE_PREDICTION):
""" Gets a prediction based on a body of text you send to a trained model you created previously.
:param model_id: string, the ID for a model you created previously.
:param document: string, a body of text to be classified.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'modelId': (None, model_id), 'document':(None, document)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def parse_rectlabel_app_output(self):
""" Internal use mostly, finds all .json files in the current folder expecting them to all have been outputted by the RectLabel app
parses each file returning finally an array representing a csv file where each element is a row and the 1st element [0] is the
column headers.
Could be useful for subsequent string manipulation therefore not prefixed with an underscore
RectLabel info: https://rectlabel.com/
"""
# get json files only
files = []
files = [f for f in os.listdir() if f[-5:] == '.json']
if len(files) == 0:
print('No json files found in this directory')
return None
max_boxes = 0
rows = []
for each_file in files:
f = open(each_file, 'r')
j = f.read()
j = json.loads(j)
f.close()
# running count of the # of boxes.
if len(j['objects']) > max_boxes:
max_boxes = len(j['objects'])
# Each json file will end up being a row
# set labels
row = []
for o in j['objects']:
labels = {}
labels['label'] = o['label']
labels['x'] = o['x_y_w_h'][0]
labels['y'] = o['x_y_w_h'][1]
labels['width'] = o['x_y_w_h'][2]
labels['height'] = o['x_y_w_h'][3]
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + j['filename'] + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.XML_parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_parse_rectlabel_app_output(self):
files = []
files = [f for f in os.listdir() if f[-4:] == '.xml']
max_boxes = 0
rows = []
for f in files:
tree = ET.parse(f)
root = tree.getroot()
row = []
objects = root.findall('object')
print(objects)
if len(objects) > max_boxes:
max_boxes = len(objects)
for o in objects:
labels = {}
labels['label'] = o.find('name').text
labels['x'] = int(o.find('bndbox').find('xmin').text)
labels['y'] = int(o.find('bndbox').find('ymin').text)
labels['width'] = int(o.find('bndbox').find('xmax').text) - labels['x']
labels['height'] = int(o.find('bndbox').find('ymax').text) - labels['y']
print(labels)
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + root.find('filename').text + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def delete_dataset(self, dataset_id, token=None, url=API_GET_DATASETS_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_DATASETS_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(dataset_id)
r = requests.delete(the_url, headers=h)
return r
def delete_model(self, model_id, token=None, url=API_GET_MODEL_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODEL_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(model_id)
r = requests.delete(the_url, headers=h)
return r
def get_list_of_all_models(self, token=None):
d = self.get_datasets_info().json()['data']
models = []
self.get_token()
print('Found ' + str(len(d)) + ' datasets. An equal number of API calls will be made and it might take a while...')
for dataset in d:
mods = self.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
def get_list_of_all_models(einstein, token=None):
print('refreshing token...')
einstein.get_token()
d = einstein.get_datasets_info().json()['data']
models = []
print (f'\nfound {len(d)} datasets - will need same amount of callouts...\n')
for dataset in d:
mods = einstein.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
|
feliperyan/EinsteinVisionPython
|
EinsteinVision/EinsteinVision.py
|
EinsteinVisionService.train_model
|
python
|
def train_model(self, dataset_id, model_name, token=None, url=API_TRAIN_MODEL):
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'name':model_name, 'datasetId':dataset_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
|
Train a model given a specifi dataset previously created
:param dataset_id: string, the id of a previously created dataset
:param model_name: string, what you will call this model
attention: This may take a while and a response will be returned before the model has
finished being trained. See docos and method get_training_status.
returns: requests object
|
train
|
https://github.com/feliperyan/EinsteinVisionPython/blob/c761c46c7dc5fe8bbe7b15a6e1166a3585ed3cfb/EinsteinVision/EinsteinVision.py#L192-L206
|
[
"def check_for_token(self, token=None): \n if token:\n return token\n else:\n return self.token\n"
] |
class EinsteinVisionService:
""" A wrapper for Salesforce's Einstein Vision API.
:param token: string, in case you obtained a token somewhere else and want to use it here.
:param email: string, the username for your Enstein Vision account, not needed if you already have a token
:param rsa_cert: string, most likely coming straight from the Heroku Config Vars
:param pem_file: string, name of a file containing your secret key, defaults to predictive_services.pem
"""
def __init__(self, token=None, email=None, rsa_cert=None, pem_file='predictive_services.pem'):
self.token = token
self.email = email
if token is None:
if rsa_cert is None:
with open(pem_file, 'r') as pem:
self.private_key = pem.read()
else:
self.private_key = rsa_cert
def get_token(self):
""" Acquires a token for futher API calls, unless you already have a token this will be the first thing
you do before you use this.
:param email: string, the username for your EinsteinVision service, usually in email form
:para pem_file: string, file containing your Secret key. Copy contents of relevant Config Var
on Heroku to a file locally.
attention: this will set self.token on success
attention: currently spitting out results via a simple print
returns: requests object
"""
payload = {
'aud': API_OAUTH,
'exp': time.time()+600, # 10 minutes
'sub': self.email
}
header = {'Content-type':'application/x-www-form-urlencoded'}
assertion = jwt.encode(payload, self.private_key, algorithm='RS256')
assertion = assertion.decode('utf-8')
response = requests.post(
url=API_OAUTH,
headers=header,
data='grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion='+assertion
)
print(response.text)
if response.status_code == 200:
print('status 200 ok for Token')
self.token = response.json()['access_token']
else:
print('Could not get Token. Status: ' + str(response.status_code))
return response
def check_for_token(self, token=None):
if token:
return token
else:
return self.token
def get_model_info(self, model_id, token=None, url=API_GET_MODEL_INFO):
""" Gets information about a specific previously trained model, ie: stats and accuracy
:param model_id: string, model_id previously supplied by the API
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_datasets_info(self, token=None, url=API_GET_DATASETS_INFO):
""" Gets information on all datasets for this account
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.get(the_url, headers=h)
return r
def get_url_image_prediction(self, model_id, picture_url, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied picture url based on a previously trained model.
:param model_id: string, once you train a model you'll be given a model id to use.
:param picture_url: string, in the form of a url pointing to a publicly accessible
image file.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'sampleLocation':picture_url, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_fileb64_image_prediction(self, model_id, filename, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image on your machine, by encoding the image data as b64
and posting to the API.
:param model_id: string, once you train a model you'll be given a model id to use.
:param filename: string, the name of a file to be posted to the api.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
with open(filename, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def get_b64_image_prediction(self, model_id, b64_encoded_string, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image enconded as a b64 string, useful when uploading
images to a server backed by this library.
:param model_id: string, once you train a model you'll be given a model id to use.
:param b64_encoded_string: string, a b64 enconded string representation of an image.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
encoded_string = b64_encoded_string
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def create_dataset_synchronous(self, file_url, dataset_type='image', token=None, url=API_CREATE_DATASET):
""" Creates a dataset so you can train models from it
:param file_url: string, url to an accessible zip file containing the necessary image files
and folder structure indicating the labels to train. See docs online.
:param dataset_type: string, one of the dataset types, available options Nov 2017 were
'image', 'image-detection' and 'image-multi-label'.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'type':dataset_type, 'path':file_url})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_training_status(self, model_id, token=None, url=API_TRAIN_MODEL):
""" Gets status on the training process once you create a model
:param model_id: string, id of the model to check
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_models_info_for_dataset(self, dataset_id, token=None, url=API_GET_MODELS):
""" Gets metadata on all models available for given dataset id
:param dataset_id: string, previously obtained dataset id
warning: if providing your own url here, also include the dataset_id in the right place
as this method will not include it for you. Otherwise use the dataset_id attribute as
per usual
returns: a requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODELS:
r = requests.get(the_url, headers=h)
return r
the_url = url.replace('<dataset_id>', dataset_id)
r = requests.get(the_url, headers=h)
return r
def create_language_dataset_from_url(self, file_url, token=None, url=API_CREATE_LANGUAGE_DATASET):
""" Creates a dataset from a publicly accessible file stored in the cloud.
:param file_url: string, in the form of a URL to a file accessible on the cloud.
Popular options include Dropbox, AWS S3, Google Drive.
warning: Google Drive by default gives you a link to a web ui that allows you to download a file
NOT to the file directly. There is a way to change the link to point directly to the file as of 2018
as this may change, please search google for a solution.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'type': (None, 'text-intent'), 'path':(None, file_url)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
# example grive direct download:
# https://drive.google.com/uc?export=download&id=1ETMujAjIQgXVnAL-e99rTbeVjZk_4j5o
def train_language_model_from_dataset(self, dataset_id, name, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Trains a model given a dataset and its ID.
:param dataset_id: string, the ID for a dataset you created previously.
:param name: string, name for your model.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'name': (None, name), 'datasetId':(None, dataset_id)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def get_language_model_status(self, model_id, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Gets the status of your model, including whether the training has finished.
:param model_id: string, the ID for a model you created previously.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_language_prediction_from_model(self, model_id, document, token=None, url=API_GET_LANGUAGE_PREDICTION):
""" Gets a prediction based on a body of text you send to a trained model you created previously.
:param model_id: string, the ID for a model you created previously.
:param document: string, a body of text to be classified.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'modelId': (None, model_id), 'document':(None, document)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def parse_rectlabel_app_output(self):
""" Internal use mostly, finds all .json files in the current folder expecting them to all have been outputted by the RectLabel app
parses each file returning finally an array representing a csv file where each element is a row and the 1st element [0] is the
column headers.
Could be useful for subsequent string manipulation therefore not prefixed with an underscore
RectLabel info: https://rectlabel.com/
"""
# get json files only
files = []
files = [f for f in os.listdir() if f[-5:] == '.json']
if len(files) == 0:
print('No json files found in this directory')
return None
max_boxes = 0
rows = []
for each_file in files:
f = open(each_file, 'r')
j = f.read()
j = json.loads(j)
f.close()
# running count of the # of boxes.
if len(j['objects']) > max_boxes:
max_boxes = len(j['objects'])
# Each json file will end up being a row
# set labels
row = []
for o in j['objects']:
labels = {}
labels['label'] = o['label']
labels['x'] = o['x_y_w_h'][0]
labels['y'] = o['x_y_w_h'][1]
labels['width'] = o['x_y_w_h'][2]
labels['height'] = o['x_y_w_h'][3]
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + j['filename'] + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.XML_parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_parse_rectlabel_app_output(self):
files = []
files = [f for f in os.listdir() if f[-4:] == '.xml']
max_boxes = 0
rows = []
for f in files:
tree = ET.parse(f)
root = tree.getroot()
row = []
objects = root.findall('object')
print(objects)
if len(objects) > max_boxes:
max_boxes = len(objects)
for o in objects:
labels = {}
labels['label'] = o.find('name').text
labels['x'] = int(o.find('bndbox').find('xmin').text)
labels['y'] = int(o.find('bndbox').find('ymin').text)
labels['width'] = int(o.find('bndbox').find('xmax').text) - labels['x']
labels['height'] = int(o.find('bndbox').find('ymax').text) - labels['y']
print(labels)
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + root.find('filename').text + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def delete_dataset(self, dataset_id, token=None, url=API_GET_DATASETS_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_DATASETS_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(dataset_id)
r = requests.delete(the_url, headers=h)
return r
def delete_model(self, model_id, token=None, url=API_GET_MODEL_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODEL_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(model_id)
r = requests.delete(the_url, headers=h)
return r
def get_list_of_all_models(self, token=None):
d = self.get_datasets_info().json()['data']
models = []
self.get_token()
print('Found ' + str(len(d)) + ' datasets. An equal number of API calls will be made and it might take a while...')
for dataset in d:
mods = self.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
def get_list_of_all_models(einstein, token=None):
print('refreshing token...')
einstein.get_token()
d = einstein.get_datasets_info().json()['data']
models = []
print (f'\nfound {len(d)} datasets - will need same amount of callouts...\n')
for dataset in d:
mods = einstein.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
|
feliperyan/EinsteinVisionPython
|
EinsteinVision/EinsteinVision.py
|
EinsteinVisionService.get_training_status
|
python
|
def get_training_status(self, model_id, token=None, url=API_TRAIN_MODEL):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
|
Gets status on the training process once you create a model
:param model_id: string, id of the model to check
returns: requests object
|
train
|
https://github.com/feliperyan/EinsteinVisionPython/blob/c761c46c7dc5fe8bbe7b15a6e1166a3585ed3cfb/EinsteinVision/EinsteinVision.py#L209-L219
|
[
"def check_for_token(self, token=None): \n if token:\n return token\n else:\n return self.token\n"
] |
class EinsteinVisionService:
""" A wrapper for Salesforce's Einstein Vision API.
:param token: string, in case you obtained a token somewhere else and want to use it here.
:param email: string, the username for your Enstein Vision account, not needed if you already have a token
:param rsa_cert: string, most likely coming straight from the Heroku Config Vars
:param pem_file: string, name of a file containing your secret key, defaults to predictive_services.pem
"""
def __init__(self, token=None, email=None, rsa_cert=None, pem_file='predictive_services.pem'):
self.token = token
self.email = email
if token is None:
if rsa_cert is None:
with open(pem_file, 'r') as pem:
self.private_key = pem.read()
else:
self.private_key = rsa_cert
def get_token(self):
""" Acquires a token for futher API calls, unless you already have a token this will be the first thing
you do before you use this.
:param email: string, the username for your EinsteinVision service, usually in email form
:para pem_file: string, file containing your Secret key. Copy contents of relevant Config Var
on Heroku to a file locally.
attention: this will set self.token on success
attention: currently spitting out results via a simple print
returns: requests object
"""
payload = {
'aud': API_OAUTH,
'exp': time.time()+600, # 10 minutes
'sub': self.email
}
header = {'Content-type':'application/x-www-form-urlencoded'}
assertion = jwt.encode(payload, self.private_key, algorithm='RS256')
assertion = assertion.decode('utf-8')
response = requests.post(
url=API_OAUTH,
headers=header,
data='grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion='+assertion
)
print(response.text)
if response.status_code == 200:
print('status 200 ok for Token')
self.token = response.json()['access_token']
else:
print('Could not get Token. Status: ' + str(response.status_code))
return response
def check_for_token(self, token=None):
if token:
return token
else:
return self.token
def get_model_info(self, model_id, token=None, url=API_GET_MODEL_INFO):
""" Gets information about a specific previously trained model, ie: stats and accuracy
:param model_id: string, model_id previously supplied by the API
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_datasets_info(self, token=None, url=API_GET_DATASETS_INFO):
""" Gets information on all datasets for this account
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.get(the_url, headers=h)
return r
def get_url_image_prediction(self, model_id, picture_url, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied picture url based on a previously trained model.
:param model_id: string, once you train a model you'll be given a model id to use.
:param picture_url: string, in the form of a url pointing to a publicly accessible
image file.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'sampleLocation':picture_url, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_fileb64_image_prediction(self, model_id, filename, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image on your machine, by encoding the image data as b64
and posting to the API.
:param model_id: string, once you train a model you'll be given a model id to use.
:param filename: string, the name of a file to be posted to the api.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
with open(filename, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def get_b64_image_prediction(self, model_id, b64_encoded_string, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image enconded as a b64 string, useful when uploading
images to a server backed by this library.
:param model_id: string, once you train a model you'll be given a model id to use.
:param b64_encoded_string: string, a b64 enconded string representation of an image.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
encoded_string = b64_encoded_string
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def create_dataset_synchronous(self, file_url, dataset_type='image', token=None, url=API_CREATE_DATASET):
""" Creates a dataset so you can train models from it
:param file_url: string, url to an accessible zip file containing the necessary image files
and folder structure indicating the labels to train. See docs online.
:param dataset_type: string, one of the dataset types, available options Nov 2017 were
'image', 'image-detection' and 'image-multi-label'.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'type':dataset_type, 'path':file_url})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def train_model(self, dataset_id, model_name, token=None, url=API_TRAIN_MODEL):
""" Train a model given a specifi dataset previously created
:param dataset_id: string, the id of a previously created dataset
:param model_name: string, what you will call this model
attention: This may take a while and a response will be returned before the model has
finished being trained. See docos and method get_training_status.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'name':model_name, 'datasetId':dataset_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_models_info_for_dataset(self, dataset_id, token=None, url=API_GET_MODELS):
""" Gets metadata on all models available for given dataset id
:param dataset_id: string, previously obtained dataset id
warning: if providing your own url here, also include the dataset_id in the right place
as this method will not include it for you. Otherwise use the dataset_id attribute as
per usual
returns: a requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODELS:
r = requests.get(the_url, headers=h)
return r
the_url = url.replace('<dataset_id>', dataset_id)
r = requests.get(the_url, headers=h)
return r
def create_language_dataset_from_url(self, file_url, token=None, url=API_CREATE_LANGUAGE_DATASET):
""" Creates a dataset from a publicly accessible file stored in the cloud.
:param file_url: string, in the form of a URL to a file accessible on the cloud.
Popular options include Dropbox, AWS S3, Google Drive.
warning: Google Drive by default gives you a link to a web ui that allows you to download a file
NOT to the file directly. There is a way to change the link to point directly to the file as of 2018
as this may change, please search google for a solution.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'type': (None, 'text-intent'), 'path':(None, file_url)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
# example grive direct download:
# https://drive.google.com/uc?export=download&id=1ETMujAjIQgXVnAL-e99rTbeVjZk_4j5o
def train_language_model_from_dataset(self, dataset_id, name, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Trains a model given a dataset and its ID.
:param dataset_id: string, the ID for a dataset you created previously.
:param name: string, name for your model.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'name': (None, name), 'datasetId':(None, dataset_id)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def get_language_model_status(self, model_id, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Gets the status of your model, including whether the training has finished.
:param model_id: string, the ID for a model you created previously.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_language_prediction_from_model(self, model_id, document, token=None, url=API_GET_LANGUAGE_PREDICTION):
""" Gets a prediction based on a body of text you send to a trained model you created previously.
:param model_id: string, the ID for a model you created previously.
:param document: string, a body of text to be classified.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'modelId': (None, model_id), 'document':(None, document)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def parse_rectlabel_app_output(self):
""" Internal use mostly, finds all .json files in the current folder expecting them to all have been outputted by the RectLabel app
parses each file returning finally an array representing a csv file where each element is a row and the 1st element [0] is the
column headers.
Could be useful for subsequent string manipulation therefore not prefixed with an underscore
RectLabel info: https://rectlabel.com/
"""
# get json files only
files = []
files = [f for f in os.listdir() if f[-5:] == '.json']
if len(files) == 0:
print('No json files found in this directory')
return None
max_boxes = 0
rows = []
for each_file in files:
f = open(each_file, 'r')
j = f.read()
j = json.loads(j)
f.close()
# running count of the # of boxes.
if len(j['objects']) > max_boxes:
max_boxes = len(j['objects'])
# Each json file will end up being a row
# set labels
row = []
for o in j['objects']:
labels = {}
labels['label'] = o['label']
labels['x'] = o['x_y_w_h'][0]
labels['y'] = o['x_y_w_h'][1]
labels['width'] = o['x_y_w_h'][2]
labels['height'] = o['x_y_w_h'][3]
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + j['filename'] + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.XML_parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_parse_rectlabel_app_output(self):
files = []
files = [f for f in os.listdir() if f[-4:] == '.xml']
max_boxes = 0
rows = []
for f in files:
tree = ET.parse(f)
root = tree.getroot()
row = []
objects = root.findall('object')
print(objects)
if len(objects) > max_boxes:
max_boxes = len(objects)
for o in objects:
labels = {}
labels['label'] = o.find('name').text
labels['x'] = int(o.find('bndbox').find('xmin').text)
labels['y'] = int(o.find('bndbox').find('ymin').text)
labels['width'] = int(o.find('bndbox').find('xmax').text) - labels['x']
labels['height'] = int(o.find('bndbox').find('ymax').text) - labels['y']
print(labels)
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + root.find('filename').text + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def delete_dataset(self, dataset_id, token=None, url=API_GET_DATASETS_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_DATASETS_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(dataset_id)
r = requests.delete(the_url, headers=h)
return r
def delete_model(self, model_id, token=None, url=API_GET_MODEL_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODEL_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(model_id)
r = requests.delete(the_url, headers=h)
return r
def get_list_of_all_models(self, token=None):
d = self.get_datasets_info().json()['data']
models = []
self.get_token()
print('Found ' + str(len(d)) + ' datasets. An equal number of API calls will be made and it might take a while...')
for dataset in d:
mods = self.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
def get_list_of_all_models(einstein, token=None):
print('refreshing token...')
einstein.get_token()
d = einstein.get_datasets_info().json()['data']
models = []
print (f'\nfound {len(d)} datasets - will need same amount of callouts...\n')
for dataset in d:
mods = einstein.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
|
feliperyan/EinsteinVisionPython
|
EinsteinVision/EinsteinVision.py
|
EinsteinVisionService.get_models_info_for_dataset
|
python
|
def get_models_info_for_dataset(self, dataset_id, token=None, url=API_GET_MODELS):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODELS:
r = requests.get(the_url, headers=h)
return r
the_url = url.replace('<dataset_id>', dataset_id)
r = requests.get(the_url, headers=h)
return r
|
Gets metadata on all models available for given dataset id
:param dataset_id: string, previously obtained dataset id
warning: if providing your own url here, also include the dataset_id in the right place
as this method will not include it for you. Otherwise use the dataset_id attribute as
per usual
returns: a requests object
|
train
|
https://github.com/feliperyan/EinsteinVisionPython/blob/c761c46c7dc5fe8bbe7b15a6e1166a3585ed3cfb/EinsteinVision/EinsteinVision.py#L222-L239
|
[
"def check_for_token(self, token=None): \n if token:\n return token\n else:\n return self.token\n"
] |
class EinsteinVisionService:
""" A wrapper for Salesforce's Einstein Vision API.
:param token: string, in case you obtained a token somewhere else and want to use it here.
:param email: string, the username for your Enstein Vision account, not needed if you already have a token
:param rsa_cert: string, most likely coming straight from the Heroku Config Vars
:param pem_file: string, name of a file containing your secret key, defaults to predictive_services.pem
"""
def __init__(self, token=None, email=None, rsa_cert=None, pem_file='predictive_services.pem'):
self.token = token
self.email = email
if token is None:
if rsa_cert is None:
with open(pem_file, 'r') as pem:
self.private_key = pem.read()
else:
self.private_key = rsa_cert
def get_token(self):
""" Acquires a token for futher API calls, unless you already have a token this will be the first thing
you do before you use this.
:param email: string, the username for your EinsteinVision service, usually in email form
:para pem_file: string, file containing your Secret key. Copy contents of relevant Config Var
on Heroku to a file locally.
attention: this will set self.token on success
attention: currently spitting out results via a simple print
returns: requests object
"""
payload = {
'aud': API_OAUTH,
'exp': time.time()+600, # 10 minutes
'sub': self.email
}
header = {'Content-type':'application/x-www-form-urlencoded'}
assertion = jwt.encode(payload, self.private_key, algorithm='RS256')
assertion = assertion.decode('utf-8')
response = requests.post(
url=API_OAUTH,
headers=header,
data='grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion='+assertion
)
print(response.text)
if response.status_code == 200:
print('status 200 ok for Token')
self.token = response.json()['access_token']
else:
print('Could not get Token. Status: ' + str(response.status_code))
return response
def check_for_token(self, token=None):
if token:
return token
else:
return self.token
def get_model_info(self, model_id, token=None, url=API_GET_MODEL_INFO):
""" Gets information about a specific previously trained model, ie: stats and accuracy
:param model_id: string, model_id previously supplied by the API
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_datasets_info(self, token=None, url=API_GET_DATASETS_INFO):
""" Gets information on all datasets for this account
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.get(the_url, headers=h)
return r
def get_url_image_prediction(self, model_id, picture_url, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied picture url based on a previously trained model.
:param model_id: string, once you train a model you'll be given a model id to use.
:param picture_url: string, in the form of a url pointing to a publicly accessible
image file.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'sampleLocation':picture_url, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_fileb64_image_prediction(self, model_id, filename, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image on your machine, by encoding the image data as b64
and posting to the API.
:param model_id: string, once you train a model you'll be given a model id to use.
:param filename: string, the name of a file to be posted to the api.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
with open(filename, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def get_b64_image_prediction(self, model_id, b64_encoded_string, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image enconded as a b64 string, useful when uploading
images to a server backed by this library.
:param model_id: string, once you train a model you'll be given a model id to use.
:param b64_encoded_string: string, a b64 enconded string representation of an image.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
encoded_string = b64_encoded_string
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def create_dataset_synchronous(self, file_url, dataset_type='image', token=None, url=API_CREATE_DATASET):
""" Creates a dataset so you can train models from it
:param file_url: string, url to an accessible zip file containing the necessary image files
and folder structure indicating the labels to train. See docs online.
:param dataset_type: string, one of the dataset types, available options Nov 2017 were
'image', 'image-detection' and 'image-multi-label'.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'type':dataset_type, 'path':file_url})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def train_model(self, dataset_id, model_name, token=None, url=API_TRAIN_MODEL):
""" Train a model given a specifi dataset previously created
:param dataset_id: string, the id of a previously created dataset
:param model_name: string, what you will call this model
attention: This may take a while and a response will be returned before the model has
finished being trained. See docos and method get_training_status.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'name':model_name, 'datasetId':dataset_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_training_status(self, model_id, token=None, url=API_TRAIN_MODEL):
""" Gets status on the training process once you create a model
:param model_id: string, id of the model to check
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def create_language_dataset_from_url(self, file_url, token=None, url=API_CREATE_LANGUAGE_DATASET):
""" Creates a dataset from a publicly accessible file stored in the cloud.
:param file_url: string, in the form of a URL to a file accessible on the cloud.
Popular options include Dropbox, AWS S3, Google Drive.
warning: Google Drive by default gives you a link to a web ui that allows you to download a file
NOT to the file directly. There is a way to change the link to point directly to the file as of 2018
as this may change, please search google for a solution.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'type': (None, 'text-intent'), 'path':(None, file_url)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
# example grive direct download:
# https://drive.google.com/uc?export=download&id=1ETMujAjIQgXVnAL-e99rTbeVjZk_4j5o
def train_language_model_from_dataset(self, dataset_id, name, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Trains a model given a dataset and its ID.
:param dataset_id: string, the ID for a dataset you created previously.
:param name: string, name for your model.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'name': (None, name), 'datasetId':(None, dataset_id)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def get_language_model_status(self, model_id, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Gets the status of your model, including whether the training has finished.
:param model_id: string, the ID for a model you created previously.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_language_prediction_from_model(self, model_id, document, token=None, url=API_GET_LANGUAGE_PREDICTION):
""" Gets a prediction based on a body of text you send to a trained model you created previously.
:param model_id: string, the ID for a model you created previously.
:param document: string, a body of text to be classified.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'modelId': (None, model_id), 'document':(None, document)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def parse_rectlabel_app_output(self):
""" Internal use mostly, finds all .json files in the current folder expecting them to all have been outputted by the RectLabel app
parses each file returning finally an array representing a csv file where each element is a row and the 1st element [0] is the
column headers.
Could be useful for subsequent string manipulation therefore not prefixed with an underscore
RectLabel info: https://rectlabel.com/
"""
# get json files only
files = []
files = [f for f in os.listdir() if f[-5:] == '.json']
if len(files) == 0:
print('No json files found in this directory')
return None
max_boxes = 0
rows = []
for each_file in files:
f = open(each_file, 'r')
j = f.read()
j = json.loads(j)
f.close()
# running count of the # of boxes.
if len(j['objects']) > max_boxes:
max_boxes = len(j['objects'])
# Each json file will end up being a row
# set labels
row = []
for o in j['objects']:
labels = {}
labels['label'] = o['label']
labels['x'] = o['x_y_w_h'][0]
labels['y'] = o['x_y_w_h'][1]
labels['width'] = o['x_y_w_h'][2]
labels['height'] = o['x_y_w_h'][3]
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + j['filename'] + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.XML_parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_parse_rectlabel_app_output(self):
files = []
files = [f for f in os.listdir() if f[-4:] == '.xml']
max_boxes = 0
rows = []
for f in files:
tree = ET.parse(f)
root = tree.getroot()
row = []
objects = root.findall('object')
print(objects)
if len(objects) > max_boxes:
max_boxes = len(objects)
for o in objects:
labels = {}
labels['label'] = o.find('name').text
labels['x'] = int(o.find('bndbox').find('xmin').text)
labels['y'] = int(o.find('bndbox').find('ymin').text)
labels['width'] = int(o.find('bndbox').find('xmax').text) - labels['x']
labels['height'] = int(o.find('bndbox').find('ymax').text) - labels['y']
print(labels)
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + root.find('filename').text + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def delete_dataset(self, dataset_id, token=None, url=API_GET_DATASETS_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_DATASETS_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(dataset_id)
r = requests.delete(the_url, headers=h)
return r
def delete_model(self, model_id, token=None, url=API_GET_MODEL_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODEL_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(model_id)
r = requests.delete(the_url, headers=h)
return r
def get_list_of_all_models(self, token=None):
d = self.get_datasets_info().json()['data']
models = []
self.get_token()
print('Found ' + str(len(d)) + ' datasets. An equal number of API calls will be made and it might take a while...')
for dataset in d:
mods = self.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
def get_list_of_all_models(einstein, token=None):
print('refreshing token...')
einstein.get_token()
d = einstein.get_datasets_info().json()['data']
models = []
print (f'\nfound {len(d)} datasets - will need same amount of callouts...\n')
for dataset in d:
mods = einstein.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
|
feliperyan/EinsteinVisionPython
|
EinsteinVision/EinsteinVision.py
|
EinsteinVisionService.create_language_dataset_from_url
|
python
|
def create_language_dataset_from_url(self, file_url, token=None, url=API_CREATE_LANGUAGE_DATASET):
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'type': (None, 'text-intent'), 'path':(None, file_url)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
|
Creates a dataset from a publicly accessible file stored in the cloud.
:param file_url: string, in the form of a URL to a file accessible on the cloud.
Popular options include Dropbox, AWS S3, Google Drive.
warning: Google Drive by default gives you a link to a web ui that allows you to download a file
NOT to the file directly. There is a way to change the link to point directly to the file as of 2018
as this may change, please search google for a solution.
returns: a request object
|
train
|
https://github.com/feliperyan/EinsteinVisionPython/blob/c761c46c7dc5fe8bbe7b15a6e1166a3585ed3cfb/EinsteinVision/EinsteinVision.py#L242-L257
|
[
"def check_for_token(self, token=None): \n if token:\n return token\n else:\n return self.token\n"
] |
class EinsteinVisionService:
""" A wrapper for Salesforce's Einstein Vision API.
:param token: string, in case you obtained a token somewhere else and want to use it here.
:param email: string, the username for your Enstein Vision account, not needed if you already have a token
:param rsa_cert: string, most likely coming straight from the Heroku Config Vars
:param pem_file: string, name of a file containing your secret key, defaults to predictive_services.pem
"""
def __init__(self, token=None, email=None, rsa_cert=None, pem_file='predictive_services.pem'):
self.token = token
self.email = email
if token is None:
if rsa_cert is None:
with open(pem_file, 'r') as pem:
self.private_key = pem.read()
else:
self.private_key = rsa_cert
def get_token(self):
""" Acquires a token for futher API calls, unless you already have a token this will be the first thing
you do before you use this.
:param email: string, the username for your EinsteinVision service, usually in email form
:para pem_file: string, file containing your Secret key. Copy contents of relevant Config Var
on Heroku to a file locally.
attention: this will set self.token on success
attention: currently spitting out results via a simple print
returns: requests object
"""
payload = {
'aud': API_OAUTH,
'exp': time.time()+600, # 10 minutes
'sub': self.email
}
header = {'Content-type':'application/x-www-form-urlencoded'}
assertion = jwt.encode(payload, self.private_key, algorithm='RS256')
assertion = assertion.decode('utf-8')
response = requests.post(
url=API_OAUTH,
headers=header,
data='grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion='+assertion
)
print(response.text)
if response.status_code == 200:
print('status 200 ok for Token')
self.token = response.json()['access_token']
else:
print('Could not get Token. Status: ' + str(response.status_code))
return response
def check_for_token(self, token=None):
if token:
return token
else:
return self.token
def get_model_info(self, model_id, token=None, url=API_GET_MODEL_INFO):
""" Gets information about a specific previously trained model, ie: stats and accuracy
:param model_id: string, model_id previously supplied by the API
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_datasets_info(self, token=None, url=API_GET_DATASETS_INFO):
""" Gets information on all datasets for this account
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.get(the_url, headers=h)
return r
def get_url_image_prediction(self, model_id, picture_url, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied picture url based on a previously trained model.
:param model_id: string, once you train a model you'll be given a model id to use.
:param picture_url: string, in the form of a url pointing to a publicly accessible
image file.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'sampleLocation':picture_url, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_fileb64_image_prediction(self, model_id, filename, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image on your machine, by encoding the image data as b64
and posting to the API.
:param model_id: string, once you train a model you'll be given a model id to use.
:param filename: string, the name of a file to be posted to the api.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
with open(filename, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def get_b64_image_prediction(self, model_id, b64_encoded_string, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image enconded as a b64 string, useful when uploading
images to a server backed by this library.
:param model_id: string, once you train a model you'll be given a model id to use.
:param b64_encoded_string: string, a b64 enconded string representation of an image.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
encoded_string = b64_encoded_string
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def create_dataset_synchronous(self, file_url, dataset_type='image', token=None, url=API_CREATE_DATASET):
""" Creates a dataset so you can train models from it
:param file_url: string, url to an accessible zip file containing the necessary image files
and folder structure indicating the labels to train. See docs online.
:param dataset_type: string, one of the dataset types, available options Nov 2017 were
'image', 'image-detection' and 'image-multi-label'.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'type':dataset_type, 'path':file_url})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def train_model(self, dataset_id, model_name, token=None, url=API_TRAIN_MODEL):
""" Train a model given a specifi dataset previously created
:param dataset_id: string, the id of a previously created dataset
:param model_name: string, what you will call this model
attention: This may take a while and a response will be returned before the model has
finished being trained. See docos and method get_training_status.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'name':model_name, 'datasetId':dataset_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_training_status(self, model_id, token=None, url=API_TRAIN_MODEL):
""" Gets status on the training process once you create a model
:param model_id: string, id of the model to check
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_models_info_for_dataset(self, dataset_id, token=None, url=API_GET_MODELS):
""" Gets metadata on all models available for given dataset id
:param dataset_id: string, previously obtained dataset id
warning: if providing your own url here, also include the dataset_id in the right place
as this method will not include it for you. Otherwise use the dataset_id attribute as
per usual
returns: a requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODELS:
r = requests.get(the_url, headers=h)
return r
the_url = url.replace('<dataset_id>', dataset_id)
r = requests.get(the_url, headers=h)
return r
# example grive direct download:
# https://drive.google.com/uc?export=download&id=1ETMujAjIQgXVnAL-e99rTbeVjZk_4j5o
def train_language_model_from_dataset(self, dataset_id, name, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Trains a model given a dataset and its ID.
:param dataset_id: string, the ID for a dataset you created previously.
:param name: string, name for your model.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'name': (None, name), 'datasetId':(None, dataset_id)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def get_language_model_status(self, model_id, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Gets the status of your model, including whether the training has finished.
:param model_id: string, the ID for a model you created previously.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_language_prediction_from_model(self, model_id, document, token=None, url=API_GET_LANGUAGE_PREDICTION):
""" Gets a prediction based on a body of text you send to a trained model you created previously.
:param model_id: string, the ID for a model you created previously.
:param document: string, a body of text to be classified.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'modelId': (None, model_id), 'document':(None, document)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def parse_rectlabel_app_output(self):
""" Internal use mostly, finds all .json files in the current folder expecting them to all have been outputted by the RectLabel app
parses each file returning finally an array representing a csv file where each element is a row and the 1st element [0] is the
column headers.
Could be useful for subsequent string manipulation therefore not prefixed with an underscore
RectLabel info: https://rectlabel.com/
"""
# get json files only
files = []
files = [f for f in os.listdir() if f[-5:] == '.json']
if len(files) == 0:
print('No json files found in this directory')
return None
max_boxes = 0
rows = []
for each_file in files:
f = open(each_file, 'r')
j = f.read()
j = json.loads(j)
f.close()
# running count of the # of boxes.
if len(j['objects']) > max_boxes:
max_boxes = len(j['objects'])
# Each json file will end up being a row
# set labels
row = []
for o in j['objects']:
labels = {}
labels['label'] = o['label']
labels['x'] = o['x_y_w_h'][0]
labels['y'] = o['x_y_w_h'][1]
labels['width'] = o['x_y_w_h'][2]
labels['height'] = o['x_y_w_h'][3]
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + j['filename'] + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.XML_parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_parse_rectlabel_app_output(self):
files = []
files = [f for f in os.listdir() if f[-4:] == '.xml']
max_boxes = 0
rows = []
for f in files:
tree = ET.parse(f)
root = tree.getroot()
row = []
objects = root.findall('object')
print(objects)
if len(objects) > max_boxes:
max_boxes = len(objects)
for o in objects:
labels = {}
labels['label'] = o.find('name').text
labels['x'] = int(o.find('bndbox').find('xmin').text)
labels['y'] = int(o.find('bndbox').find('ymin').text)
labels['width'] = int(o.find('bndbox').find('xmax').text) - labels['x']
labels['height'] = int(o.find('bndbox').find('ymax').text) - labels['y']
print(labels)
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + root.find('filename').text + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def delete_dataset(self, dataset_id, token=None, url=API_GET_DATASETS_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_DATASETS_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(dataset_id)
r = requests.delete(the_url, headers=h)
return r
def delete_model(self, model_id, token=None, url=API_GET_MODEL_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODEL_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(model_id)
r = requests.delete(the_url, headers=h)
return r
def get_list_of_all_models(self, token=None):
d = self.get_datasets_info().json()['data']
models = []
self.get_token()
print('Found ' + str(len(d)) + ' datasets. An equal number of API calls will be made and it might take a while...')
for dataset in d:
mods = self.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
def get_list_of_all_models(einstein, token=None):
print('refreshing token...')
einstein.get_token()
d = einstein.get_datasets_info().json()['data']
models = []
print (f'\nfound {len(d)} datasets - will need same amount of callouts...\n')
for dataset in d:
mods = einstein.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
|
feliperyan/EinsteinVisionPython
|
EinsteinVision/EinsteinVision.py
|
EinsteinVisionService.train_language_model_from_dataset
|
python
|
def train_language_model_from_dataset(self, dataset_id, name, token=None, url=API_TRAIN_LANGUAGE_MODEL):
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'name': (None, name), 'datasetId':(None, dataset_id)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
|
Trains a model given a dataset and its ID.
:param dataset_id: string, the ID for a dataset you created previously.
:param name: string, name for your model.
returns: a request object
|
train
|
https://github.com/feliperyan/EinsteinVisionPython/blob/c761c46c7dc5fe8bbe7b15a6e1166a3585ed3cfb/EinsteinVision/EinsteinVision.py#L263-L275
|
[
"def check_for_token(self, token=None): \n if token:\n return token\n else:\n return self.token\n"
] |
class EinsteinVisionService:
""" A wrapper for Salesforce's Einstein Vision API.
:param token: string, in case you obtained a token somewhere else and want to use it here.
:param email: string, the username for your Enstein Vision account, not needed if you already have a token
:param rsa_cert: string, most likely coming straight from the Heroku Config Vars
:param pem_file: string, name of a file containing your secret key, defaults to predictive_services.pem
"""
def __init__(self, token=None, email=None, rsa_cert=None, pem_file='predictive_services.pem'):
self.token = token
self.email = email
if token is None:
if rsa_cert is None:
with open(pem_file, 'r') as pem:
self.private_key = pem.read()
else:
self.private_key = rsa_cert
def get_token(self):
""" Acquires a token for futher API calls, unless you already have a token this will be the first thing
you do before you use this.
:param email: string, the username for your EinsteinVision service, usually in email form
:para pem_file: string, file containing your Secret key. Copy contents of relevant Config Var
on Heroku to a file locally.
attention: this will set self.token on success
attention: currently spitting out results via a simple print
returns: requests object
"""
payload = {
'aud': API_OAUTH,
'exp': time.time()+600, # 10 minutes
'sub': self.email
}
header = {'Content-type':'application/x-www-form-urlencoded'}
assertion = jwt.encode(payload, self.private_key, algorithm='RS256')
assertion = assertion.decode('utf-8')
response = requests.post(
url=API_OAUTH,
headers=header,
data='grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion='+assertion
)
print(response.text)
if response.status_code == 200:
print('status 200 ok for Token')
self.token = response.json()['access_token']
else:
print('Could not get Token. Status: ' + str(response.status_code))
return response
def check_for_token(self, token=None):
if token:
return token
else:
return self.token
def get_model_info(self, model_id, token=None, url=API_GET_MODEL_INFO):
""" Gets information about a specific previously trained model, ie: stats and accuracy
:param model_id: string, model_id previously supplied by the API
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_datasets_info(self, token=None, url=API_GET_DATASETS_INFO):
""" Gets information on all datasets for this account
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.get(the_url, headers=h)
return r
def get_url_image_prediction(self, model_id, picture_url, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied picture url based on a previously trained model.
:param model_id: string, once you train a model you'll be given a model id to use.
:param picture_url: string, in the form of a url pointing to a publicly accessible
image file.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'sampleLocation':picture_url, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_fileb64_image_prediction(self, model_id, filename, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image on your machine, by encoding the image data as b64
and posting to the API.
:param model_id: string, once you train a model you'll be given a model id to use.
:param filename: string, the name of a file to be posted to the api.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
with open(filename, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def get_b64_image_prediction(self, model_id, b64_encoded_string, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image enconded as a b64 string, useful when uploading
images to a server backed by this library.
:param model_id: string, once you train a model you'll be given a model id to use.
:param b64_encoded_string: string, a b64 enconded string representation of an image.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
encoded_string = b64_encoded_string
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def create_dataset_synchronous(self, file_url, dataset_type='image', token=None, url=API_CREATE_DATASET):
""" Creates a dataset so you can train models from it
:param file_url: string, url to an accessible zip file containing the necessary image files
and folder structure indicating the labels to train. See docs online.
:param dataset_type: string, one of the dataset types, available options Nov 2017 were
'image', 'image-detection' and 'image-multi-label'.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'type':dataset_type, 'path':file_url})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def train_model(self, dataset_id, model_name, token=None, url=API_TRAIN_MODEL):
""" Train a model given a specifi dataset previously created
:param dataset_id: string, the id of a previously created dataset
:param model_name: string, what you will call this model
attention: This may take a while and a response will be returned before the model has
finished being trained. See docos and method get_training_status.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'name':model_name, 'datasetId':dataset_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_training_status(self, model_id, token=None, url=API_TRAIN_MODEL):
""" Gets status on the training process once you create a model
:param model_id: string, id of the model to check
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_models_info_for_dataset(self, dataset_id, token=None, url=API_GET_MODELS):
""" Gets metadata on all models available for given dataset id
:param dataset_id: string, previously obtained dataset id
warning: if providing your own url here, also include the dataset_id in the right place
as this method will not include it for you. Otherwise use the dataset_id attribute as
per usual
returns: a requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODELS:
r = requests.get(the_url, headers=h)
return r
the_url = url.replace('<dataset_id>', dataset_id)
r = requests.get(the_url, headers=h)
return r
def create_language_dataset_from_url(self, file_url, token=None, url=API_CREATE_LANGUAGE_DATASET):
""" Creates a dataset from a publicly accessible file stored in the cloud.
:param file_url: string, in the form of a URL to a file accessible on the cloud.
Popular options include Dropbox, AWS S3, Google Drive.
warning: Google Drive by default gives you a link to a web ui that allows you to download a file
NOT to the file directly. There is a way to change the link to point directly to the file as of 2018
as this may change, please search google for a solution.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'type': (None, 'text-intent'), 'path':(None, file_url)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
# example grive direct download:
# https://drive.google.com/uc?export=download&id=1ETMujAjIQgXVnAL-e99rTbeVjZk_4j5o
def get_language_model_status(self, model_id, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Gets the status of your model, including whether the training has finished.
:param model_id: string, the ID for a model you created previously.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_language_prediction_from_model(self, model_id, document, token=None, url=API_GET_LANGUAGE_PREDICTION):
""" Gets a prediction based on a body of text you send to a trained model you created previously.
:param model_id: string, the ID for a model you created previously.
:param document: string, a body of text to be classified.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'modelId': (None, model_id), 'document':(None, document)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def parse_rectlabel_app_output(self):
""" Internal use mostly, finds all .json files in the current folder expecting them to all have been outputted by the RectLabel app
parses each file returning finally an array representing a csv file where each element is a row and the 1st element [0] is the
column headers.
Could be useful for subsequent string manipulation therefore not prefixed with an underscore
RectLabel info: https://rectlabel.com/
"""
# get json files only
files = []
files = [f for f in os.listdir() if f[-5:] == '.json']
if len(files) == 0:
print('No json files found in this directory')
return None
max_boxes = 0
rows = []
for each_file in files:
f = open(each_file, 'r')
j = f.read()
j = json.loads(j)
f.close()
# running count of the # of boxes.
if len(j['objects']) > max_boxes:
max_boxes = len(j['objects'])
# Each json file will end up being a row
# set labels
row = []
for o in j['objects']:
labels = {}
labels['label'] = o['label']
labels['x'] = o['x_y_w_h'][0]
labels['y'] = o['x_y_w_h'][1]
labels['width'] = o['x_y_w_h'][2]
labels['height'] = o['x_y_w_h'][3]
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + j['filename'] + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.XML_parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_parse_rectlabel_app_output(self):
files = []
files = [f for f in os.listdir() if f[-4:] == '.xml']
max_boxes = 0
rows = []
for f in files:
tree = ET.parse(f)
root = tree.getroot()
row = []
objects = root.findall('object')
print(objects)
if len(objects) > max_boxes:
max_boxes = len(objects)
for o in objects:
labels = {}
labels['label'] = o.find('name').text
labels['x'] = int(o.find('bndbox').find('xmin').text)
labels['y'] = int(o.find('bndbox').find('ymin').text)
labels['width'] = int(o.find('bndbox').find('xmax').text) - labels['x']
labels['height'] = int(o.find('bndbox').find('ymax').text) - labels['y']
print(labels)
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + root.find('filename').text + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def delete_dataset(self, dataset_id, token=None, url=API_GET_DATASETS_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_DATASETS_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(dataset_id)
r = requests.delete(the_url, headers=h)
return r
def delete_model(self, model_id, token=None, url=API_GET_MODEL_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODEL_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(model_id)
r = requests.delete(the_url, headers=h)
return r
def get_list_of_all_models(self, token=None):
d = self.get_datasets_info().json()['data']
models = []
self.get_token()
print('Found ' + str(len(d)) + ' datasets. An equal number of API calls will be made and it might take a while...')
for dataset in d:
mods = self.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
def get_list_of_all_models(einstein, token=None):
print('refreshing token...')
einstein.get_token()
d = einstein.get_datasets_info().json()['data']
models = []
print (f'\nfound {len(d)} datasets - will need same amount of callouts...\n')
for dataset in d:
mods = einstein.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
|
feliperyan/EinsteinVisionPython
|
EinsteinVision/EinsteinVision.py
|
EinsteinVisionService.get_language_model_status
|
python
|
def get_language_model_status(self, model_id, token=None, url=API_TRAIN_LANGUAGE_MODEL):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
|
Gets the status of your model, including whether the training has finished.
:param model_id: string, the ID for a model you created previously.
returns: a request object
|
train
|
https://github.com/feliperyan/EinsteinVisionPython/blob/c761c46c7dc5fe8bbe7b15a6e1166a3585ed3cfb/EinsteinVision/EinsteinVision.py#L278-L288
|
[
"def check_for_token(self, token=None): \n if token:\n return token\n else:\n return self.token\n"
] |
class EinsteinVisionService:
""" A wrapper for Salesforce's Einstein Vision API.
:param token: string, in case you obtained a token somewhere else and want to use it here.
:param email: string, the username for your Enstein Vision account, not needed if you already have a token
:param rsa_cert: string, most likely coming straight from the Heroku Config Vars
:param pem_file: string, name of a file containing your secret key, defaults to predictive_services.pem
"""
def __init__(self, token=None, email=None, rsa_cert=None, pem_file='predictive_services.pem'):
self.token = token
self.email = email
if token is None:
if rsa_cert is None:
with open(pem_file, 'r') as pem:
self.private_key = pem.read()
else:
self.private_key = rsa_cert
def get_token(self):
""" Acquires a token for futher API calls, unless you already have a token this will be the first thing
you do before you use this.
:param email: string, the username for your EinsteinVision service, usually in email form
:para pem_file: string, file containing your Secret key. Copy contents of relevant Config Var
on Heroku to a file locally.
attention: this will set self.token on success
attention: currently spitting out results via a simple print
returns: requests object
"""
payload = {
'aud': API_OAUTH,
'exp': time.time()+600, # 10 minutes
'sub': self.email
}
header = {'Content-type':'application/x-www-form-urlencoded'}
assertion = jwt.encode(payload, self.private_key, algorithm='RS256')
assertion = assertion.decode('utf-8')
response = requests.post(
url=API_OAUTH,
headers=header,
data='grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion='+assertion
)
print(response.text)
if response.status_code == 200:
print('status 200 ok for Token')
self.token = response.json()['access_token']
else:
print('Could not get Token. Status: ' + str(response.status_code))
return response
def check_for_token(self, token=None):
if token:
return token
else:
return self.token
def get_model_info(self, model_id, token=None, url=API_GET_MODEL_INFO):
""" Gets information about a specific previously trained model, ie: stats and accuracy
:param model_id: string, model_id previously supplied by the API
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_datasets_info(self, token=None, url=API_GET_DATASETS_INFO):
""" Gets information on all datasets for this account
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.get(the_url, headers=h)
return r
def get_url_image_prediction(self, model_id, picture_url, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied picture url based on a previously trained model.
:param model_id: string, once you train a model you'll be given a model id to use.
:param picture_url: string, in the form of a url pointing to a publicly accessible
image file.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'sampleLocation':picture_url, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_fileb64_image_prediction(self, model_id, filename, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image on your machine, by encoding the image data as b64
and posting to the API.
:param model_id: string, once you train a model you'll be given a model id to use.
:param filename: string, the name of a file to be posted to the api.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
with open(filename, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def get_b64_image_prediction(self, model_id, b64_encoded_string, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image enconded as a b64 string, useful when uploading
images to a server backed by this library.
:param model_id: string, once you train a model you'll be given a model id to use.
:param b64_encoded_string: string, a b64 enconded string representation of an image.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
encoded_string = b64_encoded_string
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def create_dataset_synchronous(self, file_url, dataset_type='image', token=None, url=API_CREATE_DATASET):
""" Creates a dataset so you can train models from it
:param file_url: string, url to an accessible zip file containing the necessary image files
and folder structure indicating the labels to train. See docs online.
:param dataset_type: string, one of the dataset types, available options Nov 2017 were
'image', 'image-detection' and 'image-multi-label'.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'type':dataset_type, 'path':file_url})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def train_model(self, dataset_id, model_name, token=None, url=API_TRAIN_MODEL):
""" Train a model given a specifi dataset previously created
:param dataset_id: string, the id of a previously created dataset
:param model_name: string, what you will call this model
attention: This may take a while and a response will be returned before the model has
finished being trained. See docos and method get_training_status.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'name':model_name, 'datasetId':dataset_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_training_status(self, model_id, token=None, url=API_TRAIN_MODEL):
""" Gets status on the training process once you create a model
:param model_id: string, id of the model to check
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_models_info_for_dataset(self, dataset_id, token=None, url=API_GET_MODELS):
""" Gets metadata on all models available for given dataset id
:param dataset_id: string, previously obtained dataset id
warning: if providing your own url here, also include the dataset_id in the right place
as this method will not include it for you. Otherwise use the dataset_id attribute as
per usual
returns: a requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODELS:
r = requests.get(the_url, headers=h)
return r
the_url = url.replace('<dataset_id>', dataset_id)
r = requests.get(the_url, headers=h)
return r
def create_language_dataset_from_url(self, file_url, token=None, url=API_CREATE_LANGUAGE_DATASET):
""" Creates a dataset from a publicly accessible file stored in the cloud.
:param file_url: string, in the form of a URL to a file accessible on the cloud.
Popular options include Dropbox, AWS S3, Google Drive.
warning: Google Drive by default gives you a link to a web ui that allows you to download a file
NOT to the file directly. There is a way to change the link to point directly to the file as of 2018
as this may change, please search google for a solution.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'type': (None, 'text-intent'), 'path':(None, file_url)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
# example grive direct download:
# https://drive.google.com/uc?export=download&id=1ETMujAjIQgXVnAL-e99rTbeVjZk_4j5o
def train_language_model_from_dataset(self, dataset_id, name, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Trains a model given a dataset and its ID.
:param dataset_id: string, the ID for a dataset you created previously.
:param name: string, name for your model.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'name': (None, name), 'datasetId':(None, dataset_id)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def get_language_prediction_from_model(self, model_id, document, token=None, url=API_GET_LANGUAGE_PREDICTION):
""" Gets a prediction based on a body of text you send to a trained model you created previously.
:param model_id: string, the ID for a model you created previously.
:param document: string, a body of text to be classified.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'modelId': (None, model_id), 'document':(None, document)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def parse_rectlabel_app_output(self):
""" Internal use mostly, finds all .json files in the current folder expecting them to all have been outputted by the RectLabel app
parses each file returning finally an array representing a csv file where each element is a row and the 1st element [0] is the
column headers.
Could be useful for subsequent string manipulation therefore not prefixed with an underscore
RectLabel info: https://rectlabel.com/
"""
# get json files only
files = []
files = [f for f in os.listdir() if f[-5:] == '.json']
if len(files) == 0:
print('No json files found in this directory')
return None
max_boxes = 0
rows = []
for each_file in files:
f = open(each_file, 'r')
j = f.read()
j = json.loads(j)
f.close()
# running count of the # of boxes.
if len(j['objects']) > max_boxes:
max_boxes = len(j['objects'])
# Each json file will end up being a row
# set labels
row = []
for o in j['objects']:
labels = {}
labels['label'] = o['label']
labels['x'] = o['x_y_w_h'][0]
labels['y'] = o['x_y_w_h'][1]
labels['width'] = o['x_y_w_h'][2]
labels['height'] = o['x_y_w_h'][3]
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + j['filename'] + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.XML_parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_parse_rectlabel_app_output(self):
files = []
files = [f for f in os.listdir() if f[-4:] == '.xml']
max_boxes = 0
rows = []
for f in files:
tree = ET.parse(f)
root = tree.getroot()
row = []
objects = root.findall('object')
print(objects)
if len(objects) > max_boxes:
max_boxes = len(objects)
for o in objects:
labels = {}
labels['label'] = o.find('name').text
labels['x'] = int(o.find('bndbox').find('xmin').text)
labels['y'] = int(o.find('bndbox').find('ymin').text)
labels['width'] = int(o.find('bndbox').find('xmax').text) - labels['x']
labels['height'] = int(o.find('bndbox').find('ymax').text) - labels['y']
print(labels)
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + root.find('filename').text + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def delete_dataset(self, dataset_id, token=None, url=API_GET_DATASETS_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_DATASETS_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(dataset_id)
r = requests.delete(the_url, headers=h)
return r
def delete_model(self, model_id, token=None, url=API_GET_MODEL_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODEL_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(model_id)
r = requests.delete(the_url, headers=h)
return r
def get_list_of_all_models(self, token=None):
d = self.get_datasets_info().json()['data']
models = []
self.get_token()
print('Found ' + str(len(d)) + ' datasets. An equal number of API calls will be made and it might take a while...')
for dataset in d:
mods = self.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
def get_list_of_all_models(einstein, token=None):
print('refreshing token...')
einstein.get_token()
d = einstein.get_datasets_info().json()['data']
models = []
print (f'\nfound {len(d)} datasets - will need same amount of callouts...\n')
for dataset in d:
mods = einstein.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
|
feliperyan/EinsteinVisionPython
|
EinsteinVision/EinsteinVision.py
|
EinsteinVisionService.get_language_prediction_from_model
|
python
|
def get_language_prediction_from_model(self, model_id, document, token=None, url=API_GET_LANGUAGE_PREDICTION):
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'modelId': (None, model_id), 'document':(None, document)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
|
Gets a prediction based on a body of text you send to a trained model you created previously.
:param model_id: string, the ID for a model you created previously.
:param document: string, a body of text to be classified.
returns: a request object
|
train
|
https://github.com/feliperyan/EinsteinVisionPython/blob/c761c46c7dc5fe8bbe7b15a6e1166a3585ed3cfb/EinsteinVision/EinsteinVision.py#L291-L303
|
[
"def check_for_token(self, token=None): \n if token:\n return token\n else:\n return self.token\n"
] |
class EinsteinVisionService:
""" A wrapper for Salesforce's Einstein Vision API.
:param token: string, in case you obtained a token somewhere else and want to use it here.
:param email: string, the username for your Enstein Vision account, not needed if you already have a token
:param rsa_cert: string, most likely coming straight from the Heroku Config Vars
:param pem_file: string, name of a file containing your secret key, defaults to predictive_services.pem
"""
def __init__(self, token=None, email=None, rsa_cert=None, pem_file='predictive_services.pem'):
self.token = token
self.email = email
if token is None:
if rsa_cert is None:
with open(pem_file, 'r') as pem:
self.private_key = pem.read()
else:
self.private_key = rsa_cert
def get_token(self):
""" Acquires a token for futher API calls, unless you already have a token this will be the first thing
you do before you use this.
:param email: string, the username for your EinsteinVision service, usually in email form
:para pem_file: string, file containing your Secret key. Copy contents of relevant Config Var
on Heroku to a file locally.
attention: this will set self.token on success
attention: currently spitting out results via a simple print
returns: requests object
"""
payload = {
'aud': API_OAUTH,
'exp': time.time()+600, # 10 minutes
'sub': self.email
}
header = {'Content-type':'application/x-www-form-urlencoded'}
assertion = jwt.encode(payload, self.private_key, algorithm='RS256')
assertion = assertion.decode('utf-8')
response = requests.post(
url=API_OAUTH,
headers=header,
data='grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion='+assertion
)
print(response.text)
if response.status_code == 200:
print('status 200 ok for Token')
self.token = response.json()['access_token']
else:
print('Could not get Token. Status: ' + str(response.status_code))
return response
def check_for_token(self, token=None):
if token:
return token
else:
return self.token
def get_model_info(self, model_id, token=None, url=API_GET_MODEL_INFO):
""" Gets information about a specific previously trained model, ie: stats and accuracy
:param model_id: string, model_id previously supplied by the API
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_datasets_info(self, token=None, url=API_GET_DATASETS_INFO):
""" Gets information on all datasets for this account
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.get(the_url, headers=h)
return r
def get_url_image_prediction(self, model_id, picture_url, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied picture url based on a previously trained model.
:param model_id: string, once you train a model you'll be given a model id to use.
:param picture_url: string, in the form of a url pointing to a publicly accessible
image file.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'sampleLocation':picture_url, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_fileb64_image_prediction(self, model_id, filename, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image on your machine, by encoding the image data as b64
and posting to the API.
:param model_id: string, once you train a model you'll be given a model id to use.
:param filename: string, the name of a file to be posted to the api.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
with open(filename, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def get_b64_image_prediction(self, model_id, b64_encoded_string, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image enconded as a b64 string, useful when uploading
images to a server backed by this library.
:param model_id: string, once you train a model you'll be given a model id to use.
:param b64_encoded_string: string, a b64 enconded string representation of an image.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
encoded_string = b64_encoded_string
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def create_dataset_synchronous(self, file_url, dataset_type='image', token=None, url=API_CREATE_DATASET):
""" Creates a dataset so you can train models from it
:param file_url: string, url to an accessible zip file containing the necessary image files
and folder structure indicating the labels to train. See docs online.
:param dataset_type: string, one of the dataset types, available options Nov 2017 were
'image', 'image-detection' and 'image-multi-label'.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'type':dataset_type, 'path':file_url})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def train_model(self, dataset_id, model_name, token=None, url=API_TRAIN_MODEL):
""" Train a model given a specifi dataset previously created
:param dataset_id: string, the id of a previously created dataset
:param model_name: string, what you will call this model
attention: This may take a while and a response will be returned before the model has
finished being trained. See docos and method get_training_status.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'name':model_name, 'datasetId':dataset_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_training_status(self, model_id, token=None, url=API_TRAIN_MODEL):
""" Gets status on the training process once you create a model
:param model_id: string, id of the model to check
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_models_info_for_dataset(self, dataset_id, token=None, url=API_GET_MODELS):
""" Gets metadata on all models available for given dataset id
:param dataset_id: string, previously obtained dataset id
warning: if providing your own url here, also include the dataset_id in the right place
as this method will not include it for you. Otherwise use the dataset_id attribute as
per usual
returns: a requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODELS:
r = requests.get(the_url, headers=h)
return r
the_url = url.replace('<dataset_id>', dataset_id)
r = requests.get(the_url, headers=h)
return r
def create_language_dataset_from_url(self, file_url, token=None, url=API_CREATE_LANGUAGE_DATASET):
""" Creates a dataset from a publicly accessible file stored in the cloud.
:param file_url: string, in the form of a URL to a file accessible on the cloud.
Popular options include Dropbox, AWS S3, Google Drive.
warning: Google Drive by default gives you a link to a web ui that allows you to download a file
NOT to the file directly. There is a way to change the link to point directly to the file as of 2018
as this may change, please search google for a solution.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'type': (None, 'text-intent'), 'path':(None, file_url)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
# example grive direct download:
# https://drive.google.com/uc?export=download&id=1ETMujAjIQgXVnAL-e99rTbeVjZk_4j5o
def train_language_model_from_dataset(self, dataset_id, name, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Trains a model given a dataset and its ID.
:param dataset_id: string, the ID for a dataset you created previously.
:param name: string, name for your model.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'name': (None, name), 'datasetId':(None, dataset_id)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def get_language_model_status(self, model_id, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Gets the status of your model, including whether the training has finished.
:param model_id: string, the ID for a model you created previously.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def parse_rectlabel_app_output(self):
""" Internal use mostly, finds all .json files in the current folder expecting them to all have been outputted by the RectLabel app
parses each file returning finally an array representing a csv file where each element is a row and the 1st element [0] is the
column headers.
Could be useful for subsequent string manipulation therefore not prefixed with an underscore
RectLabel info: https://rectlabel.com/
"""
# get json files only
files = []
files = [f for f in os.listdir() if f[-5:] == '.json']
if len(files) == 0:
print('No json files found in this directory')
return None
max_boxes = 0
rows = []
for each_file in files:
f = open(each_file, 'r')
j = f.read()
j = json.loads(j)
f.close()
# running count of the # of boxes.
if len(j['objects']) > max_boxes:
max_boxes = len(j['objects'])
# Each json file will end up being a row
# set labels
row = []
for o in j['objects']:
labels = {}
labels['label'] = o['label']
labels['x'] = o['x_y_w_h'][0]
labels['y'] = o['x_y_w_h'][1]
labels['width'] = o['x_y_w_h'][2]
labels['height'] = o['x_y_w_h'][3]
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + j['filename'] + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.XML_parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_parse_rectlabel_app_output(self):
files = []
files = [f for f in os.listdir() if f[-4:] == '.xml']
max_boxes = 0
rows = []
for f in files:
tree = ET.parse(f)
root = tree.getroot()
row = []
objects = root.findall('object')
print(objects)
if len(objects) > max_boxes:
max_boxes = len(objects)
for o in objects:
labels = {}
labels['label'] = o.find('name').text
labels['x'] = int(o.find('bndbox').find('xmin').text)
labels['y'] = int(o.find('bndbox').find('ymin').text)
labels['width'] = int(o.find('bndbox').find('xmax').text) - labels['x']
labels['height'] = int(o.find('bndbox').find('ymax').text) - labels['y']
print(labels)
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + root.find('filename').text + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def delete_dataset(self, dataset_id, token=None, url=API_GET_DATASETS_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_DATASETS_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(dataset_id)
r = requests.delete(the_url, headers=h)
return r
def delete_model(self, model_id, token=None, url=API_GET_MODEL_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODEL_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(model_id)
r = requests.delete(the_url, headers=h)
return r
def get_list_of_all_models(self, token=None):
d = self.get_datasets_info().json()['data']
models = []
self.get_token()
print('Found ' + str(len(d)) + ' datasets. An equal number of API calls will be made and it might take a while...')
for dataset in d:
mods = self.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
def get_list_of_all_models(einstein, token=None):
print('refreshing token...')
einstein.get_token()
d = einstein.get_datasets_info().json()['data']
models = []
print (f'\nfound {len(d)} datasets - will need same amount of callouts...\n')
for dataset in d:
mods = einstein.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
|
feliperyan/EinsteinVisionPython
|
EinsteinVision/EinsteinVision.py
|
EinsteinVisionService.parse_rectlabel_app_output
|
python
|
def parse_rectlabel_app_output(self):
# get json files only
files = []
files = [f for f in os.listdir() if f[-5:] == '.json']
if len(files) == 0:
print('No json files found in this directory')
return None
max_boxes = 0
rows = []
for each_file in files:
f = open(each_file, 'r')
j = f.read()
j = json.loads(j)
f.close()
# running count of the # of boxes.
if len(j['objects']) > max_boxes:
max_boxes = len(j['objects'])
# Each json file will end up being a row
# set labels
row = []
for o in j['objects']:
labels = {}
labels['label'] = o['label']
labels['x'] = o['x_y_w_h'][0]
labels['y'] = o['x_y_w_h'][1]
labels['width'] = o['x_y_w_h'][2]
labels['height'] = o['x_y_w_h'][3]
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + j['filename'] + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
|
Internal use mostly, finds all .json files in the current folder expecting them to all have been outputted by the RectLabel app
parses each file returning finally an array representing a csv file where each element is a row and the 1st element [0] is the
column headers.
Could be useful for subsequent string manipulation therefore not prefixed with an underscore
RectLabel info: https://rectlabel.com/
|
train
|
https://github.com/feliperyan/EinsteinVisionPython/blob/c761c46c7dc5fe8bbe7b15a6e1166a3585ed3cfb/EinsteinVision/EinsteinVision.py#L306-L364
| null |
class EinsteinVisionService:
""" A wrapper for Salesforce's Einstein Vision API.
:param token: string, in case you obtained a token somewhere else and want to use it here.
:param email: string, the username for your Enstein Vision account, not needed if you already have a token
:param rsa_cert: string, most likely coming straight from the Heroku Config Vars
:param pem_file: string, name of a file containing your secret key, defaults to predictive_services.pem
"""
def __init__(self, token=None, email=None, rsa_cert=None, pem_file='predictive_services.pem'):
self.token = token
self.email = email
if token is None:
if rsa_cert is None:
with open(pem_file, 'r') as pem:
self.private_key = pem.read()
else:
self.private_key = rsa_cert
def get_token(self):
""" Acquires a token for futher API calls, unless you already have a token this will be the first thing
you do before you use this.
:param email: string, the username for your EinsteinVision service, usually in email form
:para pem_file: string, file containing your Secret key. Copy contents of relevant Config Var
on Heroku to a file locally.
attention: this will set self.token on success
attention: currently spitting out results via a simple print
returns: requests object
"""
payload = {
'aud': API_OAUTH,
'exp': time.time()+600, # 10 minutes
'sub': self.email
}
header = {'Content-type':'application/x-www-form-urlencoded'}
assertion = jwt.encode(payload, self.private_key, algorithm='RS256')
assertion = assertion.decode('utf-8')
response = requests.post(
url=API_OAUTH,
headers=header,
data='grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion='+assertion
)
print(response.text)
if response.status_code == 200:
print('status 200 ok for Token')
self.token = response.json()['access_token']
else:
print('Could not get Token. Status: ' + str(response.status_code))
return response
def check_for_token(self, token=None):
if token:
return token
else:
return self.token
def get_model_info(self, model_id, token=None, url=API_GET_MODEL_INFO):
""" Gets information about a specific previously trained model, ie: stats and accuracy
:param model_id: string, model_id previously supplied by the API
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_datasets_info(self, token=None, url=API_GET_DATASETS_INFO):
""" Gets information on all datasets for this account
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.get(the_url, headers=h)
return r
def get_url_image_prediction(self, model_id, picture_url, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied picture url based on a previously trained model.
:param model_id: string, once you train a model you'll be given a model id to use.
:param picture_url: string, in the form of a url pointing to a publicly accessible
image file.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'sampleLocation':picture_url, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_fileb64_image_prediction(self, model_id, filename, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image on your machine, by encoding the image data as b64
and posting to the API.
:param model_id: string, once you train a model you'll be given a model id to use.
:param filename: string, the name of a file to be posted to the api.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
with open(filename, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def get_b64_image_prediction(self, model_id, b64_encoded_string, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image enconded as a b64 string, useful when uploading
images to a server backed by this library.
:param model_id: string, once you train a model you'll be given a model id to use.
:param b64_encoded_string: string, a b64 enconded string representation of an image.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
encoded_string = b64_encoded_string
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def create_dataset_synchronous(self, file_url, dataset_type='image', token=None, url=API_CREATE_DATASET):
""" Creates a dataset so you can train models from it
:param file_url: string, url to an accessible zip file containing the necessary image files
and folder structure indicating the labels to train. See docs online.
:param dataset_type: string, one of the dataset types, available options Nov 2017 were
'image', 'image-detection' and 'image-multi-label'.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'type':dataset_type, 'path':file_url})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def train_model(self, dataset_id, model_name, token=None, url=API_TRAIN_MODEL):
""" Train a model given a specifi dataset previously created
:param dataset_id: string, the id of a previously created dataset
:param model_name: string, what you will call this model
attention: This may take a while and a response will be returned before the model has
finished being trained. See docos and method get_training_status.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'name':model_name, 'datasetId':dataset_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_training_status(self, model_id, token=None, url=API_TRAIN_MODEL):
""" Gets status on the training process once you create a model
:param model_id: string, id of the model to check
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_models_info_for_dataset(self, dataset_id, token=None, url=API_GET_MODELS):
""" Gets metadata on all models available for given dataset id
:param dataset_id: string, previously obtained dataset id
warning: if providing your own url here, also include the dataset_id in the right place
as this method will not include it for you. Otherwise use the dataset_id attribute as
per usual
returns: a requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODELS:
r = requests.get(the_url, headers=h)
return r
the_url = url.replace('<dataset_id>', dataset_id)
r = requests.get(the_url, headers=h)
return r
def create_language_dataset_from_url(self, file_url, token=None, url=API_CREATE_LANGUAGE_DATASET):
""" Creates a dataset from a publicly accessible file stored in the cloud.
:param file_url: string, in the form of a URL to a file accessible on the cloud.
Popular options include Dropbox, AWS S3, Google Drive.
warning: Google Drive by default gives you a link to a web ui that allows you to download a file
NOT to the file directly. There is a way to change the link to point directly to the file as of 2018
as this may change, please search google for a solution.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'type': (None, 'text-intent'), 'path':(None, file_url)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
# example grive direct download:
# https://drive.google.com/uc?export=download&id=1ETMujAjIQgXVnAL-e99rTbeVjZk_4j5o
def train_language_model_from_dataset(self, dataset_id, name, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Trains a model given a dataset and its ID.
:param dataset_id: string, the ID for a dataset you created previously.
:param name: string, name for your model.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'name': (None, name), 'datasetId':(None, dataset_id)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def get_language_model_status(self, model_id, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Gets the status of your model, including whether the training has finished.
:param model_id: string, the ID for a model you created previously.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_language_prediction_from_model(self, model_id, document, token=None, url=API_GET_LANGUAGE_PREDICTION):
""" Gets a prediction based on a body of text you send to a trained model you created previously.
:param model_id: string, the ID for a model you created previously.
:param document: string, a body of text to be classified.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'modelId': (None, model_id), 'document':(None, document)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def parse_rectlabel_app_output(self):
""" Internal use mostly, finds all .json files in the current folder expecting them to all have been outputted by the RectLabel app
parses each file returning finally an array representing a csv file where each element is a row and the 1st element [0] is the
column headers.
Could be useful for subsequent string manipulation therefore not prefixed with an underscore
RectLabel info: https://rectlabel.com/
"""
# get json files only
files = []
files = [f for f in os.listdir() if f[-5:] == '.json']
if len(files) == 0:
print('No json files found in this directory')
return None
max_boxes = 0
rows = []
for each_file in files:
f = open(each_file, 'r')
j = f.read()
j = json.loads(j)
f.close()
# running count of the # of boxes.
if len(j['objects']) > max_boxes:
max_boxes = len(j['objects'])
# Each json file will end up being a row
# set labels
row = []
for o in j['objects']:
labels = {}
labels['label'] = o['label']
labels['x'] = o['x_y_w_h'][0]
labels['y'] = o['x_y_w_h'][1]
labels['width'] = o['x_y_w_h'][2]
labels['height'] = o['x_y_w_h'][3]
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + j['filename'] + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.XML_parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_parse_rectlabel_app_output(self):
files = []
files = [f for f in os.listdir() if f[-4:] == '.xml']
max_boxes = 0
rows = []
for f in files:
tree = ET.parse(f)
root = tree.getroot()
row = []
objects = root.findall('object')
print(objects)
if len(objects) > max_boxes:
max_boxes = len(objects)
for o in objects:
labels = {}
labels['label'] = o.find('name').text
labels['x'] = int(o.find('bndbox').find('xmin').text)
labels['y'] = int(o.find('bndbox').find('ymin').text)
labels['width'] = int(o.find('bndbox').find('xmax').text) - labels['x']
labels['height'] = int(o.find('bndbox').find('ymax').text) - labels['y']
print(labels)
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + root.find('filename').text + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def delete_dataset(self, dataset_id, token=None, url=API_GET_DATASETS_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_DATASETS_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(dataset_id)
r = requests.delete(the_url, headers=h)
return r
def delete_model(self, model_id, token=None, url=API_GET_MODEL_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODEL_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(model_id)
r = requests.delete(the_url, headers=h)
return r
def get_list_of_all_models(self, token=None):
d = self.get_datasets_info().json()['data']
models = []
self.get_token()
print('Found ' + str(len(d)) + ' datasets. An equal number of API calls will be made and it might take a while...')
for dataset in d:
mods = self.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
def get_list_of_all_models(einstein, token=None):
print('refreshing token...')
einstein.get_token()
d = einstein.get_datasets_info().json()['data']
models = []
print (f'\nfound {len(d)} datasets - will need same amount of callouts...\n')
for dataset in d:
mods = einstein.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
|
feliperyan/EinsteinVisionPython
|
EinsteinVision/EinsteinVision.py
|
EinsteinVisionService.save_parsed_data_to_csv
|
python
|
def save_parsed_data_to_csv(self, output_filename='output.csv'):
result = self.parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
|
Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
|
train
|
https://github.com/feliperyan/EinsteinVisionPython/blob/c761c46c7dc5fe8bbe7b15a6e1166a3585ed3cfb/EinsteinVision/EinsteinVision.py#L367-L379
|
[
"def parse_rectlabel_app_output(self):\n \"\"\" Internal use mostly, finds all .json files in the current folder expecting them to all have been outputted by the RectLabel app\n parses each file returning finally an array representing a csv file where each element is a row and the 1st element [0] is the\n column headers.\n Could be useful for subsequent string manipulation therefore not prefixed with an underscore\n RectLabel info: https://rectlabel.com/\n \"\"\"\n # get json files only\n files = []\n files = [f for f in os.listdir() if f[-5:] == '.json']\n\n if len(files) == 0:\n print('No json files found in this directory')\n return None\n\n max_boxes = 0 \n rows = []\n\n for each_file in files:\n f = open(each_file, 'r')\n j = f.read() \n j = json.loads(j) \n f.close()\n\n # running count of the # of boxes.\n if len(j['objects']) > max_boxes:\n max_boxes = len(j['objects'])\n\n # Each json file will end up being a row\n # set labels\n row = []\n\n for o in j['objects']:\n labels = {}\n labels['label'] = o['label']\n labels['x'] = o['x_y_w_h'][0]\n labels['y'] = o['x_y_w_h'][1]\n labels['width'] = o['x_y_w_h'][2]\n labels['height'] = o['x_y_w_h'][3]\n\n # String manipulation for csv\n labels_right_format = '\\\"' + json.dumps(labels).replace('\"', '\\\"\\\"') + '\\\"'\n\n row.append(labels_right_format)\n\n row.insert(0, '\\\"' + j['filename'] + '\\\"') \n\n rows.append(row)\n\n # one array element per row\n rows = [','.join(i) for i in rows]\n\n header = '\\\"image\\\"'\n\n for box_num in range(0, max_boxes):\n header += ', \\\"box\\\"' + str(box_num)\n\n rows.insert(0, header)\n return rows\n"
] |
class EinsteinVisionService:
""" A wrapper for Salesforce's Einstein Vision API.
:param token: string, in case you obtained a token somewhere else and want to use it here.
:param email: string, the username for your Enstein Vision account, not needed if you already have a token
:param rsa_cert: string, most likely coming straight from the Heroku Config Vars
:param pem_file: string, name of a file containing your secret key, defaults to predictive_services.pem
"""
def __init__(self, token=None, email=None, rsa_cert=None, pem_file='predictive_services.pem'):
self.token = token
self.email = email
if token is None:
if rsa_cert is None:
with open(pem_file, 'r') as pem:
self.private_key = pem.read()
else:
self.private_key = rsa_cert
def get_token(self):
""" Acquires a token for futher API calls, unless you already have a token this will be the first thing
you do before you use this.
:param email: string, the username for your EinsteinVision service, usually in email form
:para pem_file: string, file containing your Secret key. Copy contents of relevant Config Var
on Heroku to a file locally.
attention: this will set self.token on success
attention: currently spitting out results via a simple print
returns: requests object
"""
payload = {
'aud': API_OAUTH,
'exp': time.time()+600, # 10 minutes
'sub': self.email
}
header = {'Content-type':'application/x-www-form-urlencoded'}
assertion = jwt.encode(payload, self.private_key, algorithm='RS256')
assertion = assertion.decode('utf-8')
response = requests.post(
url=API_OAUTH,
headers=header,
data='grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion='+assertion
)
print(response.text)
if response.status_code == 200:
print('status 200 ok for Token')
self.token = response.json()['access_token']
else:
print('Could not get Token. Status: ' + str(response.status_code))
return response
def check_for_token(self, token=None):
if token:
return token
else:
return self.token
def get_model_info(self, model_id, token=None, url=API_GET_MODEL_INFO):
""" Gets information about a specific previously trained model, ie: stats and accuracy
:param model_id: string, model_id previously supplied by the API
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_datasets_info(self, token=None, url=API_GET_DATASETS_INFO):
""" Gets information on all datasets for this account
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.get(the_url, headers=h)
return r
def get_url_image_prediction(self, model_id, picture_url, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied picture url based on a previously trained model.
:param model_id: string, once you train a model you'll be given a model id to use.
:param picture_url: string, in the form of a url pointing to a publicly accessible
image file.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'sampleLocation':picture_url, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_fileb64_image_prediction(self, model_id, filename, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image on your machine, by encoding the image data as b64
and posting to the API.
:param model_id: string, once you train a model you'll be given a model id to use.
:param filename: string, the name of a file to be posted to the api.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
with open(filename, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def get_b64_image_prediction(self, model_id, b64_encoded_string, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image enconded as a b64 string, useful when uploading
images to a server backed by this library.
:param model_id: string, once you train a model you'll be given a model id to use.
:param b64_encoded_string: string, a b64 enconded string representation of an image.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
encoded_string = b64_encoded_string
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def create_dataset_synchronous(self, file_url, dataset_type='image', token=None, url=API_CREATE_DATASET):
""" Creates a dataset so you can train models from it
:param file_url: string, url to an accessible zip file containing the necessary image files
and folder structure indicating the labels to train. See docs online.
:param dataset_type: string, one of the dataset types, available options Nov 2017 were
'image', 'image-detection' and 'image-multi-label'.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'type':dataset_type, 'path':file_url})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def train_model(self, dataset_id, model_name, token=None, url=API_TRAIN_MODEL):
""" Train a model given a specifi dataset previously created
:param dataset_id: string, the id of a previously created dataset
:param model_name: string, what you will call this model
attention: This may take a while and a response will be returned before the model has
finished being trained. See docos and method get_training_status.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'name':model_name, 'datasetId':dataset_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_training_status(self, model_id, token=None, url=API_TRAIN_MODEL):
""" Gets status on the training process once you create a model
:param model_id: string, id of the model to check
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_models_info_for_dataset(self, dataset_id, token=None, url=API_GET_MODELS):
""" Gets metadata on all models available for given dataset id
:param dataset_id: string, previously obtained dataset id
warning: if providing your own url here, also include the dataset_id in the right place
as this method will not include it for you. Otherwise use the dataset_id attribute as
per usual
returns: a requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODELS:
r = requests.get(the_url, headers=h)
return r
the_url = url.replace('<dataset_id>', dataset_id)
r = requests.get(the_url, headers=h)
return r
def create_language_dataset_from_url(self, file_url, token=None, url=API_CREATE_LANGUAGE_DATASET):
""" Creates a dataset from a publicly accessible file stored in the cloud.
:param file_url: string, in the form of a URL to a file accessible on the cloud.
Popular options include Dropbox, AWS S3, Google Drive.
warning: Google Drive by default gives you a link to a web ui that allows you to download a file
NOT to the file directly. There is a way to change the link to point directly to the file as of 2018
as this may change, please search google for a solution.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'type': (None, 'text-intent'), 'path':(None, file_url)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
# example grive direct download:
# https://drive.google.com/uc?export=download&id=1ETMujAjIQgXVnAL-e99rTbeVjZk_4j5o
def train_language_model_from_dataset(self, dataset_id, name, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Trains a model given a dataset and its ID.
:param dataset_id: string, the ID for a dataset you created previously.
:param name: string, name for your model.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'name': (None, name), 'datasetId':(None, dataset_id)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def get_language_model_status(self, model_id, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Gets the status of your model, including whether the training has finished.
:param model_id: string, the ID for a model you created previously.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_language_prediction_from_model(self, model_id, document, token=None, url=API_GET_LANGUAGE_PREDICTION):
""" Gets a prediction based on a body of text you send to a trained model you created previously.
:param model_id: string, the ID for a model you created previously.
:param document: string, a body of text to be classified.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'modelId': (None, model_id), 'document':(None, document)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def parse_rectlabel_app_output(self):
""" Internal use mostly, finds all .json files in the current folder expecting them to all have been outputted by the RectLabel app
parses each file returning finally an array representing a csv file where each element is a row and the 1st element [0] is the
column headers.
Could be useful for subsequent string manipulation therefore not prefixed with an underscore
RectLabel info: https://rectlabel.com/
"""
# get json files only
files = []
files = [f for f in os.listdir() if f[-5:] == '.json']
if len(files) == 0:
print('No json files found in this directory')
return None
max_boxes = 0
rows = []
for each_file in files:
f = open(each_file, 'r')
j = f.read()
j = json.loads(j)
f.close()
# running count of the # of boxes.
if len(j['objects']) > max_boxes:
max_boxes = len(j['objects'])
# Each json file will end up being a row
# set labels
row = []
for o in j['objects']:
labels = {}
labels['label'] = o['label']
labels['x'] = o['x_y_w_h'][0]
labels['y'] = o['x_y_w_h'][1]
labels['width'] = o['x_y_w_h'][2]
labels['height'] = o['x_y_w_h'][3]
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + j['filename'] + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def XML_save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.XML_parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_parse_rectlabel_app_output(self):
files = []
files = [f for f in os.listdir() if f[-4:] == '.xml']
max_boxes = 0
rows = []
for f in files:
tree = ET.parse(f)
root = tree.getroot()
row = []
objects = root.findall('object')
print(objects)
if len(objects) > max_boxes:
max_boxes = len(objects)
for o in objects:
labels = {}
labels['label'] = o.find('name').text
labels['x'] = int(o.find('bndbox').find('xmin').text)
labels['y'] = int(o.find('bndbox').find('ymin').text)
labels['width'] = int(o.find('bndbox').find('xmax').text) - labels['x']
labels['height'] = int(o.find('bndbox').find('ymax').text) - labels['y']
print(labels)
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + root.find('filename').text + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def delete_dataset(self, dataset_id, token=None, url=API_GET_DATASETS_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_DATASETS_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(dataset_id)
r = requests.delete(the_url, headers=h)
return r
def delete_model(self, model_id, token=None, url=API_GET_MODEL_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODEL_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(model_id)
r = requests.delete(the_url, headers=h)
return r
def get_list_of_all_models(self, token=None):
d = self.get_datasets_info().json()['data']
models = []
self.get_token()
print('Found ' + str(len(d)) + ' datasets. An equal number of API calls will be made and it might take a while...')
for dataset in d:
mods = self.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
def get_list_of_all_models(einstein, token=None):
print('refreshing token...')
einstein.get_token()
d = einstein.get_datasets_info().json()['data']
models = []
print (f'\nfound {len(d)} datasets - will need same amount of callouts...\n')
for dataset in d:
mods = einstein.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
|
feliperyan/EinsteinVisionPython
|
EinsteinVision/EinsteinVision.py
|
EinsteinVisionService.XML_save_parsed_data_to_csv
|
python
|
def XML_save_parsed_data_to_csv(self, output_filename='output.csv'):
result = self.XML_parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
|
Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
|
train
|
https://github.com/feliperyan/EinsteinVisionPython/blob/c761c46c7dc5fe8bbe7b15a6e1166a3585ed3cfb/EinsteinVision/EinsteinVision.py#L382-L394
|
[
"def XML_parse_rectlabel_app_output(self):\n files = []\n files = [f for f in os.listdir() if f[-4:] == '.xml']\n\n max_boxes = 0\n rows = [] \n\n for f in files:\n tree = ET.parse(f)\n root = tree.getroot()\n row = []\n objects = root.findall('object')\n print(objects)\n\n if len(objects) > max_boxes:\n max_boxes = len(objects)\n\n for o in objects:\n labels = {}\n labels['label'] = o.find('name').text\n labels['x'] = int(o.find('bndbox').find('xmin').text)\n labels['y'] = int(o.find('bndbox').find('ymin').text)\n labels['width'] = int(o.find('bndbox').find('xmax').text) - labels['x']\n labels['height'] = int(o.find('bndbox').find('ymax').text) - labels['y']\n print(labels)\n # String manipulation for csv\n labels_right_format = '\\\"' + json.dumps(labels).replace('\"', '\\\"\\\"') + '\\\"'\n\n row.append(labels_right_format)\n\n row.insert(0, '\\\"' + root.find('filename').text + '\\\"') \n\n rows.append(row)\n\n # one array element per row\n rows = [','.join(i) for i in rows]\n\n header = '\\\"image\\\"'\n\n for box_num in range(0, max_boxes):\n header += ', \\\"box\\\"' + str(box_num)\n\n rows.insert(0, header)\n return rows\n"
] |
class EinsteinVisionService:
""" A wrapper for Salesforce's Einstein Vision API.
:param token: string, in case you obtained a token somewhere else and want to use it here.
:param email: string, the username for your Enstein Vision account, not needed if you already have a token
:param rsa_cert: string, most likely coming straight from the Heroku Config Vars
:param pem_file: string, name of a file containing your secret key, defaults to predictive_services.pem
"""
def __init__(self, token=None, email=None, rsa_cert=None, pem_file='predictive_services.pem'):
self.token = token
self.email = email
if token is None:
if rsa_cert is None:
with open(pem_file, 'r') as pem:
self.private_key = pem.read()
else:
self.private_key = rsa_cert
def get_token(self):
""" Acquires a token for futher API calls, unless you already have a token this will be the first thing
you do before you use this.
:param email: string, the username for your EinsteinVision service, usually in email form
:para pem_file: string, file containing your Secret key. Copy contents of relevant Config Var
on Heroku to a file locally.
attention: this will set self.token on success
attention: currently spitting out results via a simple print
returns: requests object
"""
payload = {
'aud': API_OAUTH,
'exp': time.time()+600, # 10 minutes
'sub': self.email
}
header = {'Content-type':'application/x-www-form-urlencoded'}
assertion = jwt.encode(payload, self.private_key, algorithm='RS256')
assertion = assertion.decode('utf-8')
response = requests.post(
url=API_OAUTH,
headers=header,
data='grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion='+assertion
)
print(response.text)
if response.status_code == 200:
print('status 200 ok for Token')
self.token = response.json()['access_token']
else:
print('Could not get Token. Status: ' + str(response.status_code))
return response
def check_for_token(self, token=None):
if token:
return token
else:
return self.token
def get_model_info(self, model_id, token=None, url=API_GET_MODEL_INFO):
""" Gets information about a specific previously trained model, ie: stats and accuracy
:param model_id: string, model_id previously supplied by the API
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_datasets_info(self, token=None, url=API_GET_DATASETS_INFO):
""" Gets information on all datasets for this account
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.get(the_url, headers=h)
return r
def get_url_image_prediction(self, model_id, picture_url, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied picture url based on a previously trained model.
:param model_id: string, once you train a model you'll be given a model id to use.
:param picture_url: string, in the form of a url pointing to a publicly accessible
image file.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'sampleLocation':picture_url, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_fileb64_image_prediction(self, model_id, filename, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image on your machine, by encoding the image data as b64
and posting to the API.
:param model_id: string, once you train a model you'll be given a model id to use.
:param filename: string, the name of a file to be posted to the api.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
with open(filename, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def get_b64_image_prediction(self, model_id, b64_encoded_string, token=None, url=API_GET_PREDICTION_IMAGE_URL):
""" Gets a prediction from a supplied image enconded as a b64 string, useful when uploading
images to a server backed by this library.
:param model_id: string, once you train a model you'll be given a model id to use.
:param b64_encoded_string: string, a b64 enconded string representation of an image.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
encoded_string = b64_encoded_string
m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
r = requests.post(the_url, headers=h, data=m)
return r
def create_dataset_synchronous(self, file_url, dataset_type='image', token=None, url=API_CREATE_DATASET):
""" Creates a dataset so you can train models from it
:param file_url: string, url to an accessible zip file containing the necessary image files
and folder structure indicating the labels to train. See docs online.
:param dataset_type: string, one of the dataset types, available options Nov 2017 were
'image', 'image-detection' and 'image-multi-label'.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'type':dataset_type, 'path':file_url})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def train_model(self, dataset_id, model_name, token=None, url=API_TRAIN_MODEL):
""" Train a model given a specifi dataset previously created
:param dataset_id: string, the id of a previously created dataset
:param model_name: string, what you will call this model
attention: This may take a while and a response will be returned before the model has
finished being trained. See docos and method get_training_status.
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
m = MultipartEncoder(fields={'name':model_name, 'datasetId':dataset_id})
h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type}
the_url = url
r = requests.post(the_url, headers=h, data=m)
return r
def get_training_status(self, model_id, token=None, url=API_TRAIN_MODEL):
""" Gets status on the training process once you create a model
:param model_id: string, id of the model to check
returns: requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_models_info_for_dataset(self, dataset_id, token=None, url=API_GET_MODELS):
""" Gets metadata on all models available for given dataset id
:param dataset_id: string, previously obtained dataset id
warning: if providing your own url here, also include the dataset_id in the right place
as this method will not include it for you. Otherwise use the dataset_id attribute as
per usual
returns: a requests object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODELS:
r = requests.get(the_url, headers=h)
return r
the_url = url.replace('<dataset_id>', dataset_id)
r = requests.get(the_url, headers=h)
return r
def create_language_dataset_from_url(self, file_url, token=None, url=API_CREATE_LANGUAGE_DATASET):
""" Creates a dataset from a publicly accessible file stored in the cloud.
:param file_url: string, in the form of a URL to a file accessible on the cloud.
Popular options include Dropbox, AWS S3, Google Drive.
warning: Google Drive by default gives you a link to a web ui that allows you to download a file
NOT to the file directly. There is a way to change the link to point directly to the file as of 2018
as this may change, please search google for a solution.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'type': (None, 'text-intent'), 'path':(None, file_url)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
# example grive direct download:
# https://drive.google.com/uc?export=download&id=1ETMujAjIQgXVnAL-e99rTbeVjZk_4j5o
def train_language_model_from_dataset(self, dataset_id, name, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Trains a model given a dataset and its ID.
:param dataset_id: string, the ID for a dataset you created previously.
:param name: string, name for your model.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'name': (None, name), 'datasetId':(None, dataset_id)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def get_language_model_status(self, model_id, token=None, url=API_TRAIN_LANGUAGE_MODEL):
""" Gets the status of your model, including whether the training has finished.
:param model_id: string, the ID for a model you created previously.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url + '/' + model_id
r = requests.get(the_url, headers=h)
return r
def get_language_prediction_from_model(self, model_id, document, token=None, url=API_GET_LANGUAGE_PREDICTION):
""" Gets a prediction based on a body of text you send to a trained model you created previously.
:param model_id: string, the ID for a model you created previously.
:param document: string, a body of text to be classified.
returns: a request object
"""
auth = 'Bearer ' + self.check_for_token(token)
dummy_files = {'modelId': (None, model_id), 'document':(None, document)}
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
the_url = url
r = requests.post(the_url, headers=h, files=dummy_files)
return r
def parse_rectlabel_app_output(self):
""" Internal use mostly, finds all .json files in the current folder expecting them to all have been outputted by the RectLabel app
parses each file returning finally an array representing a csv file where each element is a row and the 1st element [0] is the
column headers.
Could be useful for subsequent string manipulation therefore not prefixed with an underscore
RectLabel info: https://rectlabel.com/
"""
# get json files only
files = []
files = [f for f in os.listdir() if f[-5:] == '.json']
if len(files) == 0:
print('No json files found in this directory')
return None
max_boxes = 0
rows = []
for each_file in files:
f = open(each_file, 'r')
j = f.read()
j = json.loads(j)
f.close()
# running count of the # of boxes.
if len(j['objects']) > max_boxes:
max_boxes = len(j['objects'])
# Each json file will end up being a row
# set labels
row = []
for o in j['objects']:
labels = {}
labels['label'] = o['label']
labels['x'] = o['x_y_w_h'][0]
labels['y'] = o['x_y_w_h'][1]
labels['width'] = o['x_y_w_h'][2]
labels['height'] = o['x_y_w_h'][3]
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + j['filename'] + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def save_parsed_data_to_csv(self, output_filename='output.csv'):
""" Outputs a csv file in accordance with parse_rectlabel_app_output method. This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset.
:param output_filename string, default makes sense, but for your convenience.
"""
result = self.parse_rectlabel_app_output()
ff = open(output_filename, 'w', encoding='utf8')
for line in result:
ff.write(line + '\n')
ff.close()
def XML_parse_rectlabel_app_output(self):
files = []
files = [f for f in os.listdir() if f[-4:] == '.xml']
max_boxes = 0
rows = []
for f in files:
tree = ET.parse(f)
root = tree.getroot()
row = []
objects = root.findall('object')
print(objects)
if len(objects) > max_boxes:
max_boxes = len(objects)
for o in objects:
labels = {}
labels['label'] = o.find('name').text
labels['x'] = int(o.find('bndbox').find('xmin').text)
labels['y'] = int(o.find('bndbox').find('ymin').text)
labels['width'] = int(o.find('bndbox').find('xmax').text) - labels['x']
labels['height'] = int(o.find('bndbox').find('ymax').text) - labels['y']
print(labels)
# String manipulation for csv
labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"'
row.append(labels_right_format)
row.insert(0, '\"' + root.find('filename').text + '\"')
rows.append(row)
# one array element per row
rows = [','.join(i) for i in rows]
header = '\"image\"'
for box_num in range(0, max_boxes):
header += ', \"box\"' + str(box_num)
rows.insert(0, header)
return rows
def delete_dataset(self, dataset_id, token=None, url=API_GET_DATASETS_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_DATASETS_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(dataset_id)
r = requests.delete(the_url, headers=h)
return r
def delete_model(self, model_id, token=None, url=API_GET_MODEL_INFO):
auth = 'Bearer ' + self.check_for_token(token)
h = {'Authorization': auth, 'Cache-Control':'no-cache'}
if url != API_GET_MODEL_INFO:
r = requests.get(the_url, headers=h)
return r
the_url = url + '/' + str(model_id)
r = requests.delete(the_url, headers=h)
return r
def get_list_of_all_models(self, token=None):
d = self.get_datasets_info().json()['data']
models = []
self.get_token()
print('Found ' + str(len(d)) + ' datasets. An equal number of API calls will be made and it might take a while...')
for dataset in d:
mods = self.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
def get_list_of_all_models(einstein, token=None):
print('refreshing token...')
einstein.get_token()
d = einstein.get_datasets_info().json()['data']
models = []
print (f'\nfound {len(d)} datasets - will need same amount of callouts...\n')
for dataset in d:
mods = einstein.get_models_info_for_dataset(str(dataset['id'])).json()
for model in mods['data']:
models.append(model)
return models
|
snipsco/snipsmanagercore
|
snipsmanagercore/intent_parser.py
|
IntentParser.parse
|
python
|
def parse(payload, candidate_classes):
for cls in candidate_classes:
intent = cls.parse(payload)
if intent:
return intent
return None
|
Parse a json response into an intent.
:param payload: a JSON object representing an intent.
:param candidate_classes: a list of classes representing various
intents, each having their own `parse`
method to attempt parsing the JSON object
into the given intent class.
:return: An object version of the intent if one of the candidate
classes managed to parse it, or None.
|
train
|
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/intent_parser.py#L14-L29
| null |
class IntentParser:
""" Helper class for parsing intents. """
@staticmethod
@staticmethod
def get_intent_name(payload):
""" Return the simple intent name. An intent has the form:
{
"input": "turn the lights green",
"intent": {
"intentName": "user_BJW0GIoCx__Lights",
...
},
"slots": [...]
}
and this function extracts the last part of the intent
name ("Lights"), i.e. removing the user id.
:param payload: the intent, in JSON format.
:return: the simpe intent name.
"""
if 'intent' in payload and 'intentName' in payload['intent']:
# Snips (public) => IntentName
# public => username:IntentName
# private => private:IntentName
# private legacy => userId__IntentName
return payload['intent']['intentName'].split('__')[-1].split(":")[-1]
return None
@staticmethod
def get_slot_value(payload, slot_name):
""" Return the parsed value of a slot. An intent has the form:
{
"text": "brew me a cappuccino with 3 sugars tomorrow",
"slots": [
{"value": {"slotName": "coffee_type", "value": "cappuccino"}},
...
]
}
This function extracts a slot value given its slot name, and parses
it into a Python object if applicable (e.g. for dates).
Slots can be of various forms, the simplest being just:
{"slotName": "coffee_sugar_amout", "value": "3"}
More complex examples are date times, where we distinguish between
instant times, or intervals. Thus, a slot:
{
"slotName": "weatherForecastStartDatetime",
"value": {
"kind": "InstantTime",
"value": {
"value": "2017-07-14 00:00:00 +00:00",
"grain": "Day",
"precision": "Exact"
}
}
}
will be extracted as an `InstantTime` object, with datetime parsed
and granularity set.
Another example is a time interval:
{
"slotName": "weatherForecastStartDatetime",
"value": {
"kind": "TimeInterval",
"value": {
"from": "2017-07-14 12:00:00 +00:00",
"to": "2017-07-14 19:00:00 +00:00"
}
},
}
which will be extracted as a TimeInterval object.
:param payload: the intent, in JSON format.
:return: the parsed value, as described above.
"""
if not 'slots' in payload:
return []
slots = []
for candidate in payload['slots']:
if 'slotName' in candidate and candidate['slotName'] == slot_name:
slots.append(candidate)
result = []
for slot in slots:
kind = IntentParser.get_dict_value(slot, ['value', 'kind'])
if kind == "InstantTime":
result.append(IntentParser.parse_instant_time(slot))
elif kind == "TimeInterval":
result.append(IntentParser.parse_time_interval(slot))
else:
result.append(IntentParser.get_dict_value(slot, ['value', 'value', 'value']) \
or IntentParser.get_dict_value(slot, ['value', 'value']))
return result
@staticmethod
def parse_instant_time(slot):
""" Parse a slot into an InstantTime object.
Sample response:
{
"entity": "snips/datetime",
"range": {
"end": 36,
"start": 28
},
"rawValue": "tomorrow",
"slotName": "weatherForecastStartDatetime",
"value": {
"grain": "Day",
"kind": "InstantTime",
"precision": "Exact",
"value": "2017-09-15 00:00:00 +00:00"
}
}
:param slot: a intent slot.
:return: a parsed InstantTime object, or None.
"""
date = IntentParser.get_dict_value(slot, ['value', 'value'])
if not date:
return None
date = parse(date)
if not date:
return None
grain = InstantTime.parse_grain(
IntentParser.get_dict_value(slot,
['value', 'grain']))
return InstantTime(date, grain)
@staticmethod
def parse_time_interval(slot):
""" Parse a slot into a TimeInterval object.
Sample response:
{
"entity": "snips/datetime",
"range": {
"end": 42,
"start": 13
},
"rawValue": "between tomorrow and saturday",
"slotName": "weatherForecastStartDatetime",
"value": {
"from": "2017-09-15 00:00:00 +00:00",
"kind": "TimeInterval",
"to": "2017-09-17 00:00:00 +00:00"
}
}
:param slot: a intent slot.
:return: a parsed TimeInterval object, or None.
"""
start = IntentParser.get_dict_value(
slot, ['value', 'from'])
end = IntentParser.get_dict_value(slot, ['value', 'to'])
if not start or not end:
return None
start = parse(start)
end = parse(end)
if not start or not end:
return None
return TimeInterval(start, end)
@staticmethod
def get_dict_value(dictionary, path):
""" Safely get the value of a dictionary given a key path. For
instance, for the dictionary `{ 'a': { 'b': 1 } }`, the value at
key path ['a'] is { 'b': 1 }, at key path ['a', 'b'] is 1, at
key path ['a', 'b', 'c'] is None.
:param dictionary: a dictionary.
:param path: the key path.
:return: The value of d at the given key path, or None if the key
path does not exist.
"""
if len(path) == 0:
return None
temp_dictionary = dictionary
try:
for k in path:
temp_dictionary = temp_dictionary[k]
return temp_dictionary
except (KeyError, TypeError):
pass
return None
@staticmethod
def get_session_id(payload):
result = None
if 'sessionID' in payload:
result = payload['sessionID']
return result
@staticmethod
def get_site_id(payload):
result = None
if 'siteID' in payload:
result = payload['siteID']
return result
@staticmethod
def get_custom_data(payload):
result = None
if 'customData' in payload:
result = payload['customData']
return result
|
snipsco/snipsmanagercore
|
snipsmanagercore/intent_parser.py
|
IntentParser.get_slot_value
|
python
|
def get_slot_value(payload, slot_name):
if not 'slots' in payload:
return []
slots = []
for candidate in payload['slots']:
if 'slotName' in candidate and candidate['slotName'] == slot_name:
slots.append(candidate)
result = []
for slot in slots:
kind = IntentParser.get_dict_value(slot, ['value', 'kind'])
if kind == "InstantTime":
result.append(IntentParser.parse_instant_time(slot))
elif kind == "TimeInterval":
result.append(IntentParser.parse_time_interval(slot))
else:
result.append(IntentParser.get_dict_value(slot, ['value', 'value', 'value']) \
or IntentParser.get_dict_value(slot, ['value', 'value']))
return result
|
Return the parsed value of a slot. An intent has the form:
{
"text": "brew me a cappuccino with 3 sugars tomorrow",
"slots": [
{"value": {"slotName": "coffee_type", "value": "cappuccino"}},
...
]
}
This function extracts a slot value given its slot name, and parses
it into a Python object if applicable (e.g. for dates).
Slots can be of various forms, the simplest being just:
{"slotName": "coffee_sugar_amout", "value": "3"}
More complex examples are date times, where we distinguish between
instant times, or intervals. Thus, a slot:
{
"slotName": "weatherForecastStartDatetime",
"value": {
"kind": "InstantTime",
"value": {
"value": "2017-07-14 00:00:00 +00:00",
"grain": "Day",
"precision": "Exact"
}
}
}
will be extracted as an `InstantTime` object, with datetime parsed
and granularity set.
Another example is a time interval:
{
"slotName": "weatherForecastStartDatetime",
"value": {
"kind": "TimeInterval",
"value": {
"from": "2017-07-14 12:00:00 +00:00",
"to": "2017-07-14 19:00:00 +00:00"
}
},
}
which will be extracted as a TimeInterval object.
:param payload: the intent, in JSON format.
:return: the parsed value, as described above.
|
train
|
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/intent_parser.py#L59-L133
|
[
"def parse_instant_time(slot):\n \"\"\" Parse a slot into an InstantTime object.\n\n Sample response:\n\n {\n \"entity\": \"snips/datetime\",\n \"range\": {\n \"end\": 36,\n \"start\": 28\n },\n \"rawValue\": \"tomorrow\",\n \"slotName\": \"weatherForecastStartDatetime\",\n \"value\": {\n \"grain\": \"Day\",\n \"kind\": \"InstantTime\",\n \"precision\": \"Exact\",\n \"value\": \"2017-09-15 00:00:00 +00:00\"\n }\n }\n\n :param slot: a intent slot.\n :return: a parsed InstantTime object, or None.\n \"\"\"\n date = IntentParser.get_dict_value(slot, ['value', 'value'])\n if not date:\n return None\n date = parse(date)\n if not date:\n return None\n grain = InstantTime.parse_grain(\n IntentParser.get_dict_value(slot,\n ['value', 'grain']))\n return InstantTime(date, grain)\n",
"def parse_time_interval(slot):\n \"\"\" Parse a slot into a TimeInterval object.\n\n Sample response:\n\n {\n \"entity\": \"snips/datetime\",\n \"range\": {\n \"end\": 42,\n \"start\": 13\n },\n \"rawValue\": \"between tomorrow and saturday\",\n \"slotName\": \"weatherForecastStartDatetime\",\n \"value\": {\n \"from\": \"2017-09-15 00:00:00 +00:00\",\n \"kind\": \"TimeInterval\",\n \"to\": \"2017-09-17 00:00:00 +00:00\"\n }\n }\n\n :param slot: a intent slot.\n :return: a parsed TimeInterval object, or None.\n \"\"\"\n start = IntentParser.get_dict_value(\n slot, ['value', 'from'])\n end = IntentParser.get_dict_value(slot, ['value', 'to'])\n if not start or not end:\n return None\n start = parse(start)\n end = parse(end)\n if not start or not end:\n return None\n return TimeInterval(start, end)\n",
"def get_dict_value(dictionary, path):\n \"\"\" Safely get the value of a dictionary given a key path. For\n instance, for the dictionary `{ 'a': { 'b': 1 } }`, the value at\n key path ['a'] is { 'b': 1 }, at key path ['a', 'b'] is 1, at\n key path ['a', 'b', 'c'] is None.\n\n :param dictionary: a dictionary.\n :param path: the key path.\n :return: The value of d at the given key path, or None if the key\n path does not exist.\n \"\"\"\n if len(path) == 0:\n return None\n temp_dictionary = dictionary\n try:\n for k in path:\n temp_dictionary = temp_dictionary[k]\n return temp_dictionary\n except (KeyError, TypeError):\n pass\n return None\n"
] |
class IntentParser:
""" Helper class for parsing intents. """
@staticmethod
def parse(payload, candidate_classes):
""" Parse a json response into an intent.
:param payload: a JSON object representing an intent.
:param candidate_classes: a list of classes representing various
intents, each having their own `parse`
method to attempt parsing the JSON object
into the given intent class.
:return: An object version of the intent if one of the candidate
classes managed to parse it, or None.
"""
for cls in candidate_classes:
intent = cls.parse(payload)
if intent:
return intent
return None
@staticmethod
def get_intent_name(payload):
""" Return the simple intent name. An intent has the form:
{
"input": "turn the lights green",
"intent": {
"intentName": "user_BJW0GIoCx__Lights",
...
},
"slots": [...]
}
and this function extracts the last part of the intent
name ("Lights"), i.e. removing the user id.
:param payload: the intent, in JSON format.
:return: the simpe intent name.
"""
if 'intent' in payload and 'intentName' in payload['intent']:
# Snips (public) => IntentName
# public => username:IntentName
# private => private:IntentName
# private legacy => userId__IntentName
return payload['intent']['intentName'].split('__')[-1].split(":")[-1]
return None
@staticmethod
@staticmethod
def parse_instant_time(slot):
""" Parse a slot into an InstantTime object.
Sample response:
{
"entity": "snips/datetime",
"range": {
"end": 36,
"start": 28
},
"rawValue": "tomorrow",
"slotName": "weatherForecastStartDatetime",
"value": {
"grain": "Day",
"kind": "InstantTime",
"precision": "Exact",
"value": "2017-09-15 00:00:00 +00:00"
}
}
:param slot: a intent slot.
:return: a parsed InstantTime object, or None.
"""
date = IntentParser.get_dict_value(slot, ['value', 'value'])
if not date:
return None
date = parse(date)
if not date:
return None
grain = InstantTime.parse_grain(
IntentParser.get_dict_value(slot,
['value', 'grain']))
return InstantTime(date, grain)
@staticmethod
def parse_time_interval(slot):
""" Parse a slot into a TimeInterval object.
Sample response:
{
"entity": "snips/datetime",
"range": {
"end": 42,
"start": 13
},
"rawValue": "between tomorrow and saturday",
"slotName": "weatherForecastStartDatetime",
"value": {
"from": "2017-09-15 00:00:00 +00:00",
"kind": "TimeInterval",
"to": "2017-09-17 00:00:00 +00:00"
}
}
:param slot: a intent slot.
:return: a parsed TimeInterval object, or None.
"""
start = IntentParser.get_dict_value(
slot, ['value', 'from'])
end = IntentParser.get_dict_value(slot, ['value', 'to'])
if not start or not end:
return None
start = parse(start)
end = parse(end)
if not start or not end:
return None
return TimeInterval(start, end)
@staticmethod
def get_dict_value(dictionary, path):
""" Safely get the value of a dictionary given a key path. For
instance, for the dictionary `{ 'a': { 'b': 1 } }`, the value at
key path ['a'] is { 'b': 1 }, at key path ['a', 'b'] is 1, at
key path ['a', 'b', 'c'] is None.
:param dictionary: a dictionary.
:param path: the key path.
:return: The value of d at the given key path, or None if the key
path does not exist.
"""
if len(path) == 0:
return None
temp_dictionary = dictionary
try:
for k in path:
temp_dictionary = temp_dictionary[k]
return temp_dictionary
except (KeyError, TypeError):
pass
return None
@staticmethod
def get_session_id(payload):
result = None
if 'sessionID' in payload:
result = payload['sessionID']
return result
@staticmethod
def get_site_id(payload):
result = None
if 'siteID' in payload:
result = payload['siteID']
return result
@staticmethod
def get_custom_data(payload):
result = None
if 'customData' in payload:
result = payload['customData']
return result
|
snipsco/snipsmanagercore
|
snipsmanagercore/intent_parser.py
|
IntentParser.parse_instant_time
|
python
|
def parse_instant_time(slot):
date = IntentParser.get_dict_value(slot, ['value', 'value'])
if not date:
return None
date = parse(date)
if not date:
return None
grain = InstantTime.parse_grain(
IntentParser.get_dict_value(slot,
['value', 'grain']))
return InstantTime(date, grain)
|
Parse a slot into an InstantTime object.
Sample response:
{
"entity": "snips/datetime",
"range": {
"end": 36,
"start": 28
},
"rawValue": "tomorrow",
"slotName": "weatherForecastStartDatetime",
"value": {
"grain": "Day",
"kind": "InstantTime",
"precision": "Exact",
"value": "2017-09-15 00:00:00 +00:00"
}
}
:param slot: a intent slot.
:return: a parsed InstantTime object, or None.
|
train
|
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/intent_parser.py#L136-L169
|
[
"def parse_grain(grain):\n \"\"\" Parse a string to a granularity, e.g. \"Day\" to InstantTime.day.\n\n :param grain: a string representing a granularity.\n \"\"\"\n if not grain:\n return InstantTime.day\n if grain.lower() == 'week':\n return InstantTime.week\n return InstantTime.day\n",
"def get_dict_value(dictionary, path):\n \"\"\" Safely get the value of a dictionary given a key path. For\n instance, for the dictionary `{ 'a': { 'b': 1 } }`, the value at\n key path ['a'] is { 'b': 1 }, at key path ['a', 'b'] is 1, at\n key path ['a', 'b', 'c'] is None.\n\n :param dictionary: a dictionary.\n :param path: the key path.\n :return: The value of d at the given key path, or None if the key\n path does not exist.\n \"\"\"\n if len(path) == 0:\n return None\n temp_dictionary = dictionary\n try:\n for k in path:\n temp_dictionary = temp_dictionary[k]\n return temp_dictionary\n except (KeyError, TypeError):\n pass\n return None\n"
] |
class IntentParser:
""" Helper class for parsing intents. """
@staticmethod
def parse(payload, candidate_classes):
""" Parse a json response into an intent.
:param payload: a JSON object representing an intent.
:param candidate_classes: a list of classes representing various
intents, each having their own `parse`
method to attempt parsing the JSON object
into the given intent class.
:return: An object version of the intent if one of the candidate
classes managed to parse it, or None.
"""
for cls in candidate_classes:
intent = cls.parse(payload)
if intent:
return intent
return None
@staticmethod
def get_intent_name(payload):
""" Return the simple intent name. An intent has the form:
{
"input": "turn the lights green",
"intent": {
"intentName": "user_BJW0GIoCx__Lights",
...
},
"slots": [...]
}
and this function extracts the last part of the intent
name ("Lights"), i.e. removing the user id.
:param payload: the intent, in JSON format.
:return: the simpe intent name.
"""
if 'intent' in payload and 'intentName' in payload['intent']:
# Snips (public) => IntentName
# public => username:IntentName
# private => private:IntentName
# private legacy => userId__IntentName
return payload['intent']['intentName'].split('__')[-1].split(":")[-1]
return None
@staticmethod
def get_slot_value(payload, slot_name):
""" Return the parsed value of a slot. An intent has the form:
{
"text": "brew me a cappuccino with 3 sugars tomorrow",
"slots": [
{"value": {"slotName": "coffee_type", "value": "cappuccino"}},
...
]
}
This function extracts a slot value given its slot name, and parses
it into a Python object if applicable (e.g. for dates).
Slots can be of various forms, the simplest being just:
{"slotName": "coffee_sugar_amout", "value": "3"}
More complex examples are date times, where we distinguish between
instant times, or intervals. Thus, a slot:
{
"slotName": "weatherForecastStartDatetime",
"value": {
"kind": "InstantTime",
"value": {
"value": "2017-07-14 00:00:00 +00:00",
"grain": "Day",
"precision": "Exact"
}
}
}
will be extracted as an `InstantTime` object, with datetime parsed
and granularity set.
Another example is a time interval:
{
"slotName": "weatherForecastStartDatetime",
"value": {
"kind": "TimeInterval",
"value": {
"from": "2017-07-14 12:00:00 +00:00",
"to": "2017-07-14 19:00:00 +00:00"
}
},
}
which will be extracted as a TimeInterval object.
:param payload: the intent, in JSON format.
:return: the parsed value, as described above.
"""
if not 'slots' in payload:
return []
slots = []
for candidate in payload['slots']:
if 'slotName' in candidate and candidate['slotName'] == slot_name:
slots.append(candidate)
result = []
for slot in slots:
kind = IntentParser.get_dict_value(slot, ['value', 'kind'])
if kind == "InstantTime":
result.append(IntentParser.parse_instant_time(slot))
elif kind == "TimeInterval":
result.append(IntentParser.parse_time_interval(slot))
else:
result.append(IntentParser.get_dict_value(slot, ['value', 'value', 'value']) \
or IntentParser.get_dict_value(slot, ['value', 'value']))
return result
@staticmethod
@staticmethod
def parse_time_interval(slot):
""" Parse a slot into a TimeInterval object.
Sample response:
{
"entity": "snips/datetime",
"range": {
"end": 42,
"start": 13
},
"rawValue": "between tomorrow and saturday",
"slotName": "weatherForecastStartDatetime",
"value": {
"from": "2017-09-15 00:00:00 +00:00",
"kind": "TimeInterval",
"to": "2017-09-17 00:00:00 +00:00"
}
}
:param slot: a intent slot.
:return: a parsed TimeInterval object, or None.
"""
start = IntentParser.get_dict_value(
slot, ['value', 'from'])
end = IntentParser.get_dict_value(slot, ['value', 'to'])
if not start or not end:
return None
start = parse(start)
end = parse(end)
if not start or not end:
return None
return TimeInterval(start, end)
@staticmethod
def get_dict_value(dictionary, path):
""" Safely get the value of a dictionary given a key path. For
instance, for the dictionary `{ 'a': { 'b': 1 } }`, the value at
key path ['a'] is { 'b': 1 }, at key path ['a', 'b'] is 1, at
key path ['a', 'b', 'c'] is None.
:param dictionary: a dictionary.
:param path: the key path.
:return: The value of d at the given key path, or None if the key
path does not exist.
"""
if len(path) == 0:
return None
temp_dictionary = dictionary
try:
for k in path:
temp_dictionary = temp_dictionary[k]
return temp_dictionary
except (KeyError, TypeError):
pass
return None
@staticmethod
def get_session_id(payload):
result = None
if 'sessionID' in payload:
result = payload['sessionID']
return result
@staticmethod
def get_site_id(payload):
result = None
if 'siteID' in payload:
result = payload['siteID']
return result
@staticmethod
def get_custom_data(payload):
result = None
if 'customData' in payload:
result = payload['customData']
return result
|
snipsco/snipsmanagercore
|
snipsmanagercore/intent_parser.py
|
IntentParser.parse_time_interval
|
python
|
def parse_time_interval(slot):
start = IntentParser.get_dict_value(
slot, ['value', 'from'])
end = IntentParser.get_dict_value(slot, ['value', 'to'])
if not start or not end:
return None
start = parse(start)
end = parse(end)
if not start or not end:
return None
return TimeInterval(start, end)
|
Parse a slot into a TimeInterval object.
Sample response:
{
"entity": "snips/datetime",
"range": {
"end": 42,
"start": 13
},
"rawValue": "between tomorrow and saturday",
"slotName": "weatherForecastStartDatetime",
"value": {
"from": "2017-09-15 00:00:00 +00:00",
"kind": "TimeInterval",
"to": "2017-09-17 00:00:00 +00:00"
}
}
:param slot: a intent slot.
:return: a parsed TimeInterval object, or None.
|
train
|
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/intent_parser.py#L172-L204
|
[
"def get_dict_value(dictionary, path):\n \"\"\" Safely get the value of a dictionary given a key path. For\n instance, for the dictionary `{ 'a': { 'b': 1 } }`, the value at\n key path ['a'] is { 'b': 1 }, at key path ['a', 'b'] is 1, at\n key path ['a', 'b', 'c'] is None.\n\n :param dictionary: a dictionary.\n :param path: the key path.\n :return: The value of d at the given key path, or None if the key\n path does not exist.\n \"\"\"\n if len(path) == 0:\n return None\n temp_dictionary = dictionary\n try:\n for k in path:\n temp_dictionary = temp_dictionary[k]\n return temp_dictionary\n except (KeyError, TypeError):\n pass\n return None\n"
] |
class IntentParser:
""" Helper class for parsing intents. """
@staticmethod
def parse(payload, candidate_classes):
""" Parse a json response into an intent.
:param payload: a JSON object representing an intent.
:param candidate_classes: a list of classes representing various
intents, each having their own `parse`
method to attempt parsing the JSON object
into the given intent class.
:return: An object version of the intent if one of the candidate
classes managed to parse it, or None.
"""
for cls in candidate_classes:
intent = cls.parse(payload)
if intent:
return intent
return None
@staticmethod
def get_intent_name(payload):
""" Return the simple intent name. An intent has the form:
{
"input": "turn the lights green",
"intent": {
"intentName": "user_BJW0GIoCx__Lights",
...
},
"slots": [...]
}
and this function extracts the last part of the intent
name ("Lights"), i.e. removing the user id.
:param payload: the intent, in JSON format.
:return: the simpe intent name.
"""
if 'intent' in payload and 'intentName' in payload['intent']:
# Snips (public) => IntentName
# public => username:IntentName
# private => private:IntentName
# private legacy => userId__IntentName
return payload['intent']['intentName'].split('__')[-1].split(":")[-1]
return None
@staticmethod
def get_slot_value(payload, slot_name):
""" Return the parsed value of a slot. An intent has the form:
{
"text": "brew me a cappuccino with 3 sugars tomorrow",
"slots": [
{"value": {"slotName": "coffee_type", "value": "cappuccino"}},
...
]
}
This function extracts a slot value given its slot name, and parses
it into a Python object if applicable (e.g. for dates).
Slots can be of various forms, the simplest being just:
{"slotName": "coffee_sugar_amout", "value": "3"}
More complex examples are date times, where we distinguish between
instant times, or intervals. Thus, a slot:
{
"slotName": "weatherForecastStartDatetime",
"value": {
"kind": "InstantTime",
"value": {
"value": "2017-07-14 00:00:00 +00:00",
"grain": "Day",
"precision": "Exact"
}
}
}
will be extracted as an `InstantTime` object, with datetime parsed
and granularity set.
Another example is a time interval:
{
"slotName": "weatherForecastStartDatetime",
"value": {
"kind": "TimeInterval",
"value": {
"from": "2017-07-14 12:00:00 +00:00",
"to": "2017-07-14 19:00:00 +00:00"
}
},
}
which will be extracted as a TimeInterval object.
:param payload: the intent, in JSON format.
:return: the parsed value, as described above.
"""
if not 'slots' in payload:
return []
slots = []
for candidate in payload['slots']:
if 'slotName' in candidate and candidate['slotName'] == slot_name:
slots.append(candidate)
result = []
for slot in slots:
kind = IntentParser.get_dict_value(slot, ['value', 'kind'])
if kind == "InstantTime":
result.append(IntentParser.parse_instant_time(slot))
elif kind == "TimeInterval":
result.append(IntentParser.parse_time_interval(slot))
else:
result.append(IntentParser.get_dict_value(slot, ['value', 'value', 'value']) \
or IntentParser.get_dict_value(slot, ['value', 'value']))
return result
@staticmethod
def parse_instant_time(slot):
""" Parse a slot into an InstantTime object.
Sample response:
{
"entity": "snips/datetime",
"range": {
"end": 36,
"start": 28
},
"rawValue": "tomorrow",
"slotName": "weatherForecastStartDatetime",
"value": {
"grain": "Day",
"kind": "InstantTime",
"precision": "Exact",
"value": "2017-09-15 00:00:00 +00:00"
}
}
:param slot: a intent slot.
:return: a parsed InstantTime object, or None.
"""
date = IntentParser.get_dict_value(slot, ['value', 'value'])
if not date:
return None
date = parse(date)
if not date:
return None
grain = InstantTime.parse_grain(
IntentParser.get_dict_value(slot,
['value', 'grain']))
return InstantTime(date, grain)
@staticmethod
@staticmethod
def get_dict_value(dictionary, path):
""" Safely get the value of a dictionary given a key path. For
instance, for the dictionary `{ 'a': { 'b': 1 } }`, the value at
key path ['a'] is { 'b': 1 }, at key path ['a', 'b'] is 1, at
key path ['a', 'b', 'c'] is None.
:param dictionary: a dictionary.
:param path: the key path.
:return: The value of d at the given key path, or None if the key
path does not exist.
"""
if len(path) == 0:
return None
temp_dictionary = dictionary
try:
for k in path:
temp_dictionary = temp_dictionary[k]
return temp_dictionary
except (KeyError, TypeError):
pass
return None
@staticmethod
def get_session_id(payload):
result = None
if 'sessionID' in payload:
result = payload['sessionID']
return result
@staticmethod
def get_site_id(payload):
result = None
if 'siteID' in payload:
result = payload['siteID']
return result
@staticmethod
def get_custom_data(payload):
result = None
if 'customData' in payload:
result = payload['customData']
return result
|
snipsco/snipsmanagercore
|
snipsmanagercore/intent_parser.py
|
IntentParser.get_dict_value
|
python
|
def get_dict_value(dictionary, path):
if len(path) == 0:
return None
temp_dictionary = dictionary
try:
for k in path:
temp_dictionary = temp_dictionary[k]
return temp_dictionary
except (KeyError, TypeError):
pass
return None
|
Safely get the value of a dictionary given a key path. For
instance, for the dictionary `{ 'a': { 'b': 1 } }`, the value at
key path ['a'] is { 'b': 1 }, at key path ['a', 'b'] is 1, at
key path ['a', 'b', 'c'] is None.
:param dictionary: a dictionary.
:param path: the key path.
:return: The value of d at the given key path, or None if the key
path does not exist.
|
train
|
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/intent_parser.py#L207-L227
| null |
class IntentParser:
""" Helper class for parsing intents. """
@staticmethod
def parse(payload, candidate_classes):
""" Parse a json response into an intent.
:param payload: a JSON object representing an intent.
:param candidate_classes: a list of classes representing various
intents, each having their own `parse`
method to attempt parsing the JSON object
into the given intent class.
:return: An object version of the intent if one of the candidate
classes managed to parse it, or None.
"""
for cls in candidate_classes:
intent = cls.parse(payload)
if intent:
return intent
return None
@staticmethod
def get_intent_name(payload):
""" Return the simple intent name. An intent has the form:
{
"input": "turn the lights green",
"intent": {
"intentName": "user_BJW0GIoCx__Lights",
...
},
"slots": [...]
}
and this function extracts the last part of the intent
name ("Lights"), i.e. removing the user id.
:param payload: the intent, in JSON format.
:return: the simpe intent name.
"""
if 'intent' in payload and 'intentName' in payload['intent']:
# Snips (public) => IntentName
# public => username:IntentName
# private => private:IntentName
# private legacy => userId__IntentName
return payload['intent']['intentName'].split('__')[-1].split(":")[-1]
return None
@staticmethod
def get_slot_value(payload, slot_name):
""" Return the parsed value of a slot. An intent has the form:
{
"text": "brew me a cappuccino with 3 sugars tomorrow",
"slots": [
{"value": {"slotName": "coffee_type", "value": "cappuccino"}},
...
]
}
This function extracts a slot value given its slot name, and parses
it into a Python object if applicable (e.g. for dates).
Slots can be of various forms, the simplest being just:
{"slotName": "coffee_sugar_amout", "value": "3"}
More complex examples are date times, where we distinguish between
instant times, or intervals. Thus, a slot:
{
"slotName": "weatherForecastStartDatetime",
"value": {
"kind": "InstantTime",
"value": {
"value": "2017-07-14 00:00:00 +00:00",
"grain": "Day",
"precision": "Exact"
}
}
}
will be extracted as an `InstantTime` object, with datetime parsed
and granularity set.
Another example is a time interval:
{
"slotName": "weatherForecastStartDatetime",
"value": {
"kind": "TimeInterval",
"value": {
"from": "2017-07-14 12:00:00 +00:00",
"to": "2017-07-14 19:00:00 +00:00"
}
},
}
which will be extracted as a TimeInterval object.
:param payload: the intent, in JSON format.
:return: the parsed value, as described above.
"""
if not 'slots' in payload:
return []
slots = []
for candidate in payload['slots']:
if 'slotName' in candidate and candidate['slotName'] == slot_name:
slots.append(candidate)
result = []
for slot in slots:
kind = IntentParser.get_dict_value(slot, ['value', 'kind'])
if kind == "InstantTime":
result.append(IntentParser.parse_instant_time(slot))
elif kind == "TimeInterval":
result.append(IntentParser.parse_time_interval(slot))
else:
result.append(IntentParser.get_dict_value(slot, ['value', 'value', 'value']) \
or IntentParser.get_dict_value(slot, ['value', 'value']))
return result
@staticmethod
def parse_instant_time(slot):
""" Parse a slot into an InstantTime object.
Sample response:
{
"entity": "snips/datetime",
"range": {
"end": 36,
"start": 28
},
"rawValue": "tomorrow",
"slotName": "weatherForecastStartDatetime",
"value": {
"grain": "Day",
"kind": "InstantTime",
"precision": "Exact",
"value": "2017-09-15 00:00:00 +00:00"
}
}
:param slot: a intent slot.
:return: a parsed InstantTime object, or None.
"""
date = IntentParser.get_dict_value(slot, ['value', 'value'])
if not date:
return None
date = parse(date)
if not date:
return None
grain = InstantTime.parse_grain(
IntentParser.get_dict_value(slot,
['value', 'grain']))
return InstantTime(date, grain)
@staticmethod
def parse_time_interval(slot):
""" Parse a slot into a TimeInterval object.
Sample response:
{
"entity": "snips/datetime",
"range": {
"end": 42,
"start": 13
},
"rawValue": "between tomorrow and saturday",
"slotName": "weatherForecastStartDatetime",
"value": {
"from": "2017-09-15 00:00:00 +00:00",
"kind": "TimeInterval",
"to": "2017-09-17 00:00:00 +00:00"
}
}
:param slot: a intent slot.
:return: a parsed TimeInterval object, or None.
"""
start = IntentParser.get_dict_value(
slot, ['value', 'from'])
end = IntentParser.get_dict_value(slot, ['value', 'to'])
if not start or not end:
return None
start = parse(start)
end = parse(end)
if not start or not end:
return None
return TimeInterval(start, end)
@staticmethod
@staticmethod
def get_session_id(payload):
result = None
if 'sessionID' in payload:
result = payload['sessionID']
return result
@staticmethod
def get_site_id(payload):
result = None
if 'siteID' in payload:
result = payload['siteID']
return result
@staticmethod
def get_custom_data(payload):
result = None
if 'customData' in payload:
result = payload['customData']
return result
|
snipsco/snipsmanagercore
|
snipsmanagercore/tts.py
|
GTTS.speak
|
python
|
def speak(self, sentence):
temp_dir = "/tmp/"
filename = "gtts.mp3"
file_path = "{}/{}".format(temp_dir, filename)
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
def delete_file():
try:
os.remove(file_path)
if not os.listdir(temp_dir):
try:
os.rmdir(temp_dir)
except OSError:
pass
except:
pass
if self.logger is not None:
self.logger.info("Google TTS: {}".format(sentence))
tts = gTTS(text=sentence, lang=self.locale)
tts.save(file_path)
AudioPlayer.play_async(file_path, delete_file)
|
Speak a sentence using Google TTS.
:param sentence: the sentence to speak.
|
train
|
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/tts.py#L23-L49
|
[
"def play_async(cls, file_path, on_done=None):\n \"\"\" Play an audio file asynchronously.\n\n :param file_path: the path to the file to play.\n :param on_done: callback when audio playback completes.\n \"\"\"\n thread = threading.Thread(\n target=AudioPlayer.play, args=(file_path, on_done,))\n thread.start()\n"
] |
class GTTS:
""" Google TTS service. """
def __init__(self, locale, logger=None):
""" Initialise the service.
:param locale: the language locale, e.g. "fr" or "en_US".
"""
self.logger = logger
self.locale = locale.split("_")[0]
|
snipsco/snipsmanagercore
|
snipsmanagercore/instant_time.py
|
InstantTime.parse_grain
|
python
|
def parse_grain(grain):
if not grain:
return InstantTime.day
if grain.lower() == 'week':
return InstantTime.week
return InstantTime.day
|
Parse a string to a granularity, e.g. "Day" to InstantTime.day.
:param grain: a string representing a granularity.
|
train
|
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/instant_time.py#L23-L32
| null |
class InstantTime:
""" A representation of a datetime with a given granularity (day, week).
"""
day, week = range(2)
def __init__(self, datetime, granularity=None):
""" Initialisation.
:param datetime: the underlying datetime object
:param granularity: granularity of the datetime, either
InstantTime.day or InstantTime.week.
"""
self.datetime = datetime
self.granularity = granularity or InstantTime.day
@staticmethod
|
snipsco/snipsmanagercore
|
snipsmanagercore/sound_service.py
|
SoundService.play
|
python
|
def play(state):
filename = None
if state == SoundService.State.welcome:
filename = "pad_glow_welcome1.wav"
elif state == SoundService.State.goodbye:
filename = "pad_glow_power_off.wav"
elif state == SoundService.State.hotword_detected:
filename = "pad_soft_on.wav"
elif state == SoundService.State.asr_text_captured:
filename = "pad_soft_off.wav"
elif state == SoundService.State.error:
filename = "music_marimba_error_chord_2x.wav"
if filename is not None:
AudioPlayer.play_async("{}/{}".format(ABS_SOUND_DIR, filename))
|
Play sound for a given state.
:param state: a State value.
|
train
|
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/sound_service.py#L23-L41
|
[
"def play_async(cls, file_path, on_done=None):\n \"\"\" Play an audio file asynchronously.\n\n :param file_path: the path to the file to play.\n :param on_done: callback when audio playback completes.\n \"\"\"\n thread = threading.Thread(\n target=AudioPlayer.play, args=(file_path, on_done,))\n thread.start()\n"
] |
class SoundService:
""" Sound service for playing various state sounds. """
class State:
""" States handled by the sound service. """
none, welcome, goodbye, hotword_detected, asr_text_captured, error = range(
6)
@staticmethod
|
snipsco/snipsmanagercore
|
snipsmanagercore/thread_handler.py
|
ThreadHandler.run
|
python
|
def run(self, target, args=()):
run_event = threading.Event()
run_event.set()
thread = threading.Thread(target=target, args=args + (run_event, ))
self.thread_pool.append(thread)
self.run_events.append(run_event)
thread.start()
|
Run a function in a separate thread.
:param target: the function to run.
:param args: the parameters to pass to the function.
|
train
|
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/thread_handler.py#L19-L30
| null |
class ThreadHandler(Singleton):
""" Thread handler. """
def __init__(self):
""" Initialisation. """
self.thread_pool = []
self.run_events = []
def start_run_loop(self):
""" Start the thread handler, ensuring that everything stops property
when sending a keyboard interrup.
"""
try:
while 1:
time.sleep(.1)
except KeyboardInterrupt:
self.stop()
def stop(self):
""" Stop all functions running in the thread handler."""
for run_event in self.run_events:
run_event.clear()
for thread in self.thread_pool:
thread.join()
|
snipsco/snipsmanagercore
|
snipsmanagercore/thread_handler.py
|
ThreadHandler.stop
|
python
|
def stop(self):
for run_event in self.run_events:
run_event.clear()
for thread in self.thread_pool:
thread.join()
|
Stop all functions running in the thread handler.
|
train
|
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/thread_handler.py#L42-L48
| null |
class ThreadHandler(Singleton):
""" Thread handler. """
def __init__(self):
""" Initialisation. """
self.thread_pool = []
self.run_events = []
def run(self, target, args=()):
""" Run a function in a separate thread.
:param target: the function to run.
:param args: the parameters to pass to the function.
"""
run_event = threading.Event()
run_event.set()
thread = threading.Thread(target=target, args=args + (run_event, ))
self.thread_pool.append(thread)
self.run_events.append(run_event)
thread.start()
def start_run_loop(self):
""" Start the thread handler, ensuring that everything stops property
when sending a keyboard interrup.
"""
try:
while 1:
time.sleep(.1)
except KeyboardInterrupt:
self.stop()
|
snipsco/snipsmanagercore
|
snipsmanagercore/audio_player.py
|
AudioPlayer.play
|
python
|
def play(cls, file_path, on_done=None, logger=None):
pygame.mixer.init()
try:
pygame.mixer.music.load(file_path)
except pygame.error as e:
if logger is not None:
logger.warning(str(e))
return
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
time.sleep(0.1)
continue
if on_done:
on_done()
|
Play an audio file.
:param file_path: the path to the file to play.
:param on_done: callback when audio playback completes.
|
train
|
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/audio_player.py#L12-L31
| null |
class AudioPlayer:
""" A simple audio player based on pygame. """
@classmethod
@classmethod
def play_async(cls, file_path, on_done=None):
""" Play an audio file asynchronously.
:param file_path: the path to the file to play.
:param on_done: callback when audio playback completes.
"""
thread = threading.Thread(
target=AudioPlayer.play, args=(file_path, on_done,))
thread.start()
@classmethod
def stop(cls):
""" Stop the audio. """
pygame.mixer.init()
pygame.mixer.music.stop()
@classmethod
def pause(cls):
""" Pause the audio. """
pygame.mixer.init()
pygame.mixer.music.pause()
@classmethod
def resume(cls):
""" Resume the audio. """
pygame.mixer.init()
pygame.mixer.music.unpause()
|
snipsco/snipsmanagercore
|
snipsmanagercore/audio_player.py
|
AudioPlayer.play_async
|
python
|
def play_async(cls, file_path, on_done=None):
thread = threading.Thread(
target=AudioPlayer.play, args=(file_path, on_done,))
thread.start()
|
Play an audio file asynchronously.
:param file_path: the path to the file to play.
:param on_done: callback when audio playback completes.
|
train
|
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/audio_player.py#L34-L42
| null |
class AudioPlayer:
""" A simple audio player based on pygame. """
@classmethod
def play(cls, file_path, on_done=None, logger=None):
""" Play an audio file.
:param file_path: the path to the file to play.
:param on_done: callback when audio playback completes.
"""
pygame.mixer.init()
try:
pygame.mixer.music.load(file_path)
except pygame.error as e:
if logger is not None:
logger.warning(str(e))
return
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
time.sleep(0.1)
continue
if on_done:
on_done()
@classmethod
@classmethod
def stop(cls):
""" Stop the audio. """
pygame.mixer.init()
pygame.mixer.music.stop()
@classmethod
def pause(cls):
""" Pause the audio. """
pygame.mixer.init()
pygame.mixer.music.pause()
@classmethod
def resume(cls):
""" Resume the audio. """
pygame.mixer.init()
pygame.mixer.music.unpause()
|
snipsco/snipsmanagercore
|
snipsmanagercore/server.py
|
Server.start
|
python
|
def start(self):
self.thread_handler.run(target=self.start_blocking)
self.thread_handler.start_run_loop()
|
Start the MQTT client.
|
train
|
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/server.py#L72-L75
| null |
class Server():
""" Snips core server. """
DIALOGUE_EVENT_STARTED, DIALOGUE_EVENT_ENDED, DIALOGUE_EVENT_QUEUED = range(3)
def __init__(self,
mqtt_hostname,
mqtt_port,
tts_service_id,
locale,
registry,
handle_intent,
handlers_dialogue_events=None,
handle_start_listening=None,
handle_done_listening=None,
logger=None):
""" Initialisation.
:param config: a YAML configuration.
:param assistant: the client assistant class, holding the
intent handler and intents registry.
"""
self.logger = logger
self.registry = registry
self.handle_intent = handle_intent
self.handlers_dialogue_events = handlers_dialogue_events
self.handle_start_listening = handle_start_listening
self.handle_done_listening = handle_done_listening
self.thread_handler = ThreadHandler()
self.state_handler = StateHandler(self.thread_handler)
self.client = mqtt.Client()
self.client.on_connect = self.on_connect
self.client.on_disconnect = self.on_disconnect
self.client.on_message = self.on_message
self.mqtt_hostname = mqtt_hostname
self.mqtt_port = mqtt_port
self.tts_service_id = tts_service_id
self.locale = locale
self.dialogue = SnipsDialogueAPI(self.client, tts_service_id, locale)
self.first_hotword_detected = False
def start_blocking(self, run_event):
""" Start the MQTT client, as a blocking method.
:param run_event: a run event object provided by the thread handler.
"""
topics = [("hermes/intent/#", 0), ("hermes/hotword/#", 0), ("hermes/asr/#", 0), ("hermes/nlu/#", 0),
("snipsmanager/#", 0)]
self.log_info("Connecting to {} on port {}".format(self.mqtt_hostname, str(self.mqtt_port)))
retry = 0
while True and run_event.is_set():
try:
self.log_info("Trying to connect to {}".format(self.mqtt_hostname))
self.client.connect(self.mqtt_hostname, self.mqtt_port, 60)
break
except (socket_error, Exception) as e:
self.log_info("MQTT error {}".format(e))
time.sleep(5 + int(retry / 5))
retry = retry + 1
topics = [
(MQTT_TOPIC_INTENT + '#', 0),
(MQTT_TOPIC_HOTWORD + '#', 0),
(MQTT_TOPIC_ASR + '#', 0),
(MQTT_TOPIC_SNIPSFILE, 0),
(MQTT_TOPIC_DIALOG_MANAGER + '#', 0),
("snipsmanager/#", 0)
]
self.client.subscribe(topics)
while run_event.is_set():
try:
self.client.loop()
except AttributeError as e:
self.log_info("Error in mqtt run loop {}".format(e))
time.sleep(1)
# pylint: disable=unused-argument,no-self-use
def on_connect(self, client, userdata, flags, result_code):
""" Callback when the MQTT client is connected.
:param client: the client being connected.
:param userdata: unused.
:param flags: unused.
:param result_code: result code.
"""
self.log_info("Connected with result code {}".format(result_code))
self.state_handler.set_state(State.welcome)
# pylint: disable=unused-argument
def on_disconnect(self, client, userdata, result_code):
""" Callback when the MQTT client is disconnected. In this case,
the server waits five seconds before trying to reconnected.
:param client: the client being disconnected.
:param userdata: unused.
:param result_code: result code.
"""
self.log_info("Disconnected with result code " + str(result_code))
self.state_handler.set_state(State.goodbye)
time.sleep(5)
self.thread_handler.run(target=self.start_blocking)
# pylint: disable=unused-argument
def on_message(self, client, userdata, msg):
""" Callback when the MQTT client received a new message.
:param client: the MQTT client.
:param userdata: unused.
:param msg: the MQTT message.
"""
if msg is None:
return
self.log_info("New message on topic {}".format(msg.topic))
self.log_debug("Payload {}".format(msg.payload))
if msg.payload is None or len(msg.payload) == 0:
pass
if msg.payload:
payload = json.loads(msg.payload.decode('utf-8'))
site_id = payload.get('siteId')
session_id = payload.get('sessionId')
if msg.topic is not None and msg.topic.startswith(MQTT_TOPIC_INTENT) and msg.payload:
payload = json.loads(msg.payload.decode('utf-8'))
intent = IntentParser.parse(payload, self.registry.intent_classes)
self.log_debug("Parsed intent: {}".format(intent))
if self.handle_intent is not None:
if intent is not None:
self.log_debug("New intent: {}".format(str(intent.intentName)))
self.handle_intent(intent, payload)
elif msg.topic is not None and msg.topic == MQTT_TOPIC_HOTWORD + "toggleOn":
self.state_handler.set_state(State.hotword_toggle_on)
elif MQTT_TOPIC_HOTWORD_DETECTED_RE.match(msg.topic):
if not self.first_hotword_detected:
self.client.publish(
"hermes/feedback/sound/toggleOff", payload=None, qos=0, retain=False)
self.first_hotword_detected = True
self.state_handler.set_state(State.hotword_detected)
if self.handle_start_listening is not None:
self.handle_start_listening()
elif msg.topic == MQTT_TOPIC_ASR + "startListening":
self.state_handler.set_state(State.asr_start_listening)
elif msg.topic == MQTT_TOPIC_ASR + "textCaptured":
self.state_handler.set_state(State.asr_text_captured)
if msg.payload is not None:
self.log_debug("Text captured: {}".format(str(msg.payload)))
if self.handle_done_listening is not None:
self.handle_done_listening()
payload = json.loads(msg.payload.decode('utf-8'))
if payload['text'] == '':
self.handle_intent(None, None)
elif msg.topic is not None and msg.topic == "hermes/nlu/intentNotRecognized":
self.handle_intent(None, None)
elif msg.topic == "snipsmanager/setSnipsfile" and msg.payload:
self.state_handler.set_state(State.asr_text_captured)
elif msg.topic == MQTT_TOPIC_SESSION_STARTED:
self.state_handler.set_state(State.session_started)
if self.handlers_dialogue_events is not None:
self.handlers_dialogue_events(self.DIALOGUE_EVENT_STARTED, session_id, site_id)
elif msg.topic == MQTT_TOPIC_SESSION_ENDED:
self.state_handler.set_state(State.session_ended)
if self.handlers_dialogue_events is not None:
self.handlers_dialogue_events(self.DIALOGUE_EVENT_ENDED, session_id, site_id)
elif msg.topic == MQTT_TOPIC_SESSION_QUEUED:
self.state_handler.set_state(State.session_queued)
if self.handlers_dialogue_events is not None:
self.handlers_dialogue_events(self.DIALOGUE_EVENT_QUEUED, session_id, site_id)
def log_info(self, message):
if self.logger is not None:
self.logger.info(message)
def log_debug(self, message):
if self.logger is not None:
self.logger.debug(message)
def log_error(self, message):
if self.logger is not None:
self.logger.error(message)
|
snipsco/snipsmanagercore
|
snipsmanagercore/server.py
|
Server.start_blocking
|
python
|
def start_blocking(self, run_event):
topics = [("hermes/intent/#", 0), ("hermes/hotword/#", 0), ("hermes/asr/#", 0), ("hermes/nlu/#", 0),
("snipsmanager/#", 0)]
self.log_info("Connecting to {} on port {}".format(self.mqtt_hostname, str(self.mqtt_port)))
retry = 0
while True and run_event.is_set():
try:
self.log_info("Trying to connect to {}".format(self.mqtt_hostname))
self.client.connect(self.mqtt_hostname, self.mqtt_port, 60)
break
except (socket_error, Exception) as e:
self.log_info("MQTT error {}".format(e))
time.sleep(5 + int(retry / 5))
retry = retry + 1
topics = [
(MQTT_TOPIC_INTENT + '#', 0),
(MQTT_TOPIC_HOTWORD + '#', 0),
(MQTT_TOPIC_ASR + '#', 0),
(MQTT_TOPIC_SNIPSFILE, 0),
(MQTT_TOPIC_DIALOG_MANAGER + '#', 0),
("snipsmanager/#", 0)
]
self.client.subscribe(topics)
while run_event.is_set():
try:
self.client.loop()
except AttributeError as e:
self.log_info("Error in mqtt run loop {}".format(e))
time.sleep(1)
|
Start the MQTT client, as a blocking method.
:param run_event: a run event object provided by the thread handler.
|
train
|
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/server.py#L77-L113
|
[
"def log_info(self, message):\n if self.logger is not None:\n self.logger.info(message)\n"
] |
class Server():
""" Snips core server. """
DIALOGUE_EVENT_STARTED, DIALOGUE_EVENT_ENDED, DIALOGUE_EVENT_QUEUED = range(3)
def __init__(self,
mqtt_hostname,
mqtt_port,
tts_service_id,
locale,
registry,
handle_intent,
handlers_dialogue_events=None,
handle_start_listening=None,
handle_done_listening=None,
logger=None):
""" Initialisation.
:param config: a YAML configuration.
:param assistant: the client assistant class, holding the
intent handler and intents registry.
"""
self.logger = logger
self.registry = registry
self.handle_intent = handle_intent
self.handlers_dialogue_events = handlers_dialogue_events
self.handle_start_listening = handle_start_listening
self.handle_done_listening = handle_done_listening
self.thread_handler = ThreadHandler()
self.state_handler = StateHandler(self.thread_handler)
self.client = mqtt.Client()
self.client.on_connect = self.on_connect
self.client.on_disconnect = self.on_disconnect
self.client.on_message = self.on_message
self.mqtt_hostname = mqtt_hostname
self.mqtt_port = mqtt_port
self.tts_service_id = tts_service_id
self.locale = locale
self.dialogue = SnipsDialogueAPI(self.client, tts_service_id, locale)
self.first_hotword_detected = False
def start(self):
""" Start the MQTT client. """
self.thread_handler.run(target=self.start_blocking)
self.thread_handler.start_run_loop()
# pylint: disable=unused-argument,no-self-use
def on_connect(self, client, userdata, flags, result_code):
""" Callback when the MQTT client is connected.
:param client: the client being connected.
:param userdata: unused.
:param flags: unused.
:param result_code: result code.
"""
self.log_info("Connected with result code {}".format(result_code))
self.state_handler.set_state(State.welcome)
# pylint: disable=unused-argument
def on_disconnect(self, client, userdata, result_code):
""" Callback when the MQTT client is disconnected. In this case,
the server waits five seconds before trying to reconnected.
:param client: the client being disconnected.
:param userdata: unused.
:param result_code: result code.
"""
self.log_info("Disconnected with result code " + str(result_code))
self.state_handler.set_state(State.goodbye)
time.sleep(5)
self.thread_handler.run(target=self.start_blocking)
# pylint: disable=unused-argument
def on_message(self, client, userdata, msg):
""" Callback when the MQTT client received a new message.
:param client: the MQTT client.
:param userdata: unused.
:param msg: the MQTT message.
"""
if msg is None:
return
self.log_info("New message on topic {}".format(msg.topic))
self.log_debug("Payload {}".format(msg.payload))
if msg.payload is None or len(msg.payload) == 0:
pass
if msg.payload:
payload = json.loads(msg.payload.decode('utf-8'))
site_id = payload.get('siteId')
session_id = payload.get('sessionId')
if msg.topic is not None and msg.topic.startswith(MQTT_TOPIC_INTENT) and msg.payload:
payload = json.loads(msg.payload.decode('utf-8'))
intent = IntentParser.parse(payload, self.registry.intent_classes)
self.log_debug("Parsed intent: {}".format(intent))
if self.handle_intent is not None:
if intent is not None:
self.log_debug("New intent: {}".format(str(intent.intentName)))
self.handle_intent(intent, payload)
elif msg.topic is not None and msg.topic == MQTT_TOPIC_HOTWORD + "toggleOn":
self.state_handler.set_state(State.hotword_toggle_on)
elif MQTT_TOPIC_HOTWORD_DETECTED_RE.match(msg.topic):
if not self.first_hotword_detected:
self.client.publish(
"hermes/feedback/sound/toggleOff", payload=None, qos=0, retain=False)
self.first_hotword_detected = True
self.state_handler.set_state(State.hotword_detected)
if self.handle_start_listening is not None:
self.handle_start_listening()
elif msg.topic == MQTT_TOPIC_ASR + "startListening":
self.state_handler.set_state(State.asr_start_listening)
elif msg.topic == MQTT_TOPIC_ASR + "textCaptured":
self.state_handler.set_state(State.asr_text_captured)
if msg.payload is not None:
self.log_debug("Text captured: {}".format(str(msg.payload)))
if self.handle_done_listening is not None:
self.handle_done_listening()
payload = json.loads(msg.payload.decode('utf-8'))
if payload['text'] == '':
self.handle_intent(None, None)
elif msg.topic is not None and msg.topic == "hermes/nlu/intentNotRecognized":
self.handle_intent(None, None)
elif msg.topic == "snipsmanager/setSnipsfile" and msg.payload:
self.state_handler.set_state(State.asr_text_captured)
elif msg.topic == MQTT_TOPIC_SESSION_STARTED:
self.state_handler.set_state(State.session_started)
if self.handlers_dialogue_events is not None:
self.handlers_dialogue_events(self.DIALOGUE_EVENT_STARTED, session_id, site_id)
elif msg.topic == MQTT_TOPIC_SESSION_ENDED:
self.state_handler.set_state(State.session_ended)
if self.handlers_dialogue_events is not None:
self.handlers_dialogue_events(self.DIALOGUE_EVENT_ENDED, session_id, site_id)
elif msg.topic == MQTT_TOPIC_SESSION_QUEUED:
self.state_handler.set_state(State.session_queued)
if self.handlers_dialogue_events is not None:
self.handlers_dialogue_events(self.DIALOGUE_EVENT_QUEUED, session_id, site_id)
def log_info(self, message):
if self.logger is not None:
self.logger.info(message)
def log_debug(self, message):
if self.logger is not None:
self.logger.debug(message)
def log_error(self, message):
if self.logger is not None:
self.logger.error(message)
|
snipsco/snipsmanagercore
|
snipsmanagercore/server.py
|
Server.on_connect
|
python
|
def on_connect(self, client, userdata, flags, result_code):
self.log_info("Connected with result code {}".format(result_code))
self.state_handler.set_state(State.welcome)
|
Callback when the MQTT client is connected.
:param client: the client being connected.
:param userdata: unused.
:param flags: unused.
:param result_code: result code.
|
train
|
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/server.py#L116-L125
|
[
"def log_info(self, message):\n if self.logger is not None:\n self.logger.info(message)\n"
] |
class Server():
""" Snips core server. """
DIALOGUE_EVENT_STARTED, DIALOGUE_EVENT_ENDED, DIALOGUE_EVENT_QUEUED = range(3)
def __init__(self,
mqtt_hostname,
mqtt_port,
tts_service_id,
locale,
registry,
handle_intent,
handlers_dialogue_events=None,
handle_start_listening=None,
handle_done_listening=None,
logger=None):
""" Initialisation.
:param config: a YAML configuration.
:param assistant: the client assistant class, holding the
intent handler and intents registry.
"""
self.logger = logger
self.registry = registry
self.handle_intent = handle_intent
self.handlers_dialogue_events = handlers_dialogue_events
self.handle_start_listening = handle_start_listening
self.handle_done_listening = handle_done_listening
self.thread_handler = ThreadHandler()
self.state_handler = StateHandler(self.thread_handler)
self.client = mqtt.Client()
self.client.on_connect = self.on_connect
self.client.on_disconnect = self.on_disconnect
self.client.on_message = self.on_message
self.mqtt_hostname = mqtt_hostname
self.mqtt_port = mqtt_port
self.tts_service_id = tts_service_id
self.locale = locale
self.dialogue = SnipsDialogueAPI(self.client, tts_service_id, locale)
self.first_hotword_detected = False
def start(self):
""" Start the MQTT client. """
self.thread_handler.run(target=self.start_blocking)
self.thread_handler.start_run_loop()
def start_blocking(self, run_event):
""" Start the MQTT client, as a blocking method.
:param run_event: a run event object provided by the thread handler.
"""
topics = [("hermes/intent/#", 0), ("hermes/hotword/#", 0), ("hermes/asr/#", 0), ("hermes/nlu/#", 0),
("snipsmanager/#", 0)]
self.log_info("Connecting to {} on port {}".format(self.mqtt_hostname, str(self.mqtt_port)))
retry = 0
while True and run_event.is_set():
try:
self.log_info("Trying to connect to {}".format(self.mqtt_hostname))
self.client.connect(self.mqtt_hostname, self.mqtt_port, 60)
break
except (socket_error, Exception) as e:
self.log_info("MQTT error {}".format(e))
time.sleep(5 + int(retry / 5))
retry = retry + 1
topics = [
(MQTT_TOPIC_INTENT + '#', 0),
(MQTT_TOPIC_HOTWORD + '#', 0),
(MQTT_TOPIC_ASR + '#', 0),
(MQTT_TOPIC_SNIPSFILE, 0),
(MQTT_TOPIC_DIALOG_MANAGER + '#', 0),
("snipsmanager/#", 0)
]
self.client.subscribe(topics)
while run_event.is_set():
try:
self.client.loop()
except AttributeError as e:
self.log_info("Error in mqtt run loop {}".format(e))
time.sleep(1)
# pylint: disable=unused-argument,no-self-use
# pylint: disable=unused-argument
def on_disconnect(self, client, userdata, result_code):
""" Callback when the MQTT client is disconnected. In this case,
the server waits five seconds before trying to reconnected.
:param client: the client being disconnected.
:param userdata: unused.
:param result_code: result code.
"""
self.log_info("Disconnected with result code " + str(result_code))
self.state_handler.set_state(State.goodbye)
time.sleep(5)
self.thread_handler.run(target=self.start_blocking)
# pylint: disable=unused-argument
def on_message(self, client, userdata, msg):
""" Callback when the MQTT client received a new message.
:param client: the MQTT client.
:param userdata: unused.
:param msg: the MQTT message.
"""
if msg is None:
return
self.log_info("New message on topic {}".format(msg.topic))
self.log_debug("Payload {}".format(msg.payload))
if msg.payload is None or len(msg.payload) == 0:
pass
if msg.payload:
payload = json.loads(msg.payload.decode('utf-8'))
site_id = payload.get('siteId')
session_id = payload.get('sessionId')
if msg.topic is not None and msg.topic.startswith(MQTT_TOPIC_INTENT) and msg.payload:
payload = json.loads(msg.payload.decode('utf-8'))
intent = IntentParser.parse(payload, self.registry.intent_classes)
self.log_debug("Parsed intent: {}".format(intent))
if self.handle_intent is not None:
if intent is not None:
self.log_debug("New intent: {}".format(str(intent.intentName)))
self.handle_intent(intent, payload)
elif msg.topic is not None and msg.topic == MQTT_TOPIC_HOTWORD + "toggleOn":
self.state_handler.set_state(State.hotword_toggle_on)
elif MQTT_TOPIC_HOTWORD_DETECTED_RE.match(msg.topic):
if not self.first_hotword_detected:
self.client.publish(
"hermes/feedback/sound/toggleOff", payload=None, qos=0, retain=False)
self.first_hotword_detected = True
self.state_handler.set_state(State.hotword_detected)
if self.handle_start_listening is not None:
self.handle_start_listening()
elif msg.topic == MQTT_TOPIC_ASR + "startListening":
self.state_handler.set_state(State.asr_start_listening)
elif msg.topic == MQTT_TOPIC_ASR + "textCaptured":
self.state_handler.set_state(State.asr_text_captured)
if msg.payload is not None:
self.log_debug("Text captured: {}".format(str(msg.payload)))
if self.handle_done_listening is not None:
self.handle_done_listening()
payload = json.loads(msg.payload.decode('utf-8'))
if payload['text'] == '':
self.handle_intent(None, None)
elif msg.topic is not None and msg.topic == "hermes/nlu/intentNotRecognized":
self.handle_intent(None, None)
elif msg.topic == "snipsmanager/setSnipsfile" and msg.payload:
self.state_handler.set_state(State.asr_text_captured)
elif msg.topic == MQTT_TOPIC_SESSION_STARTED:
self.state_handler.set_state(State.session_started)
if self.handlers_dialogue_events is not None:
self.handlers_dialogue_events(self.DIALOGUE_EVENT_STARTED, session_id, site_id)
elif msg.topic == MQTT_TOPIC_SESSION_ENDED:
self.state_handler.set_state(State.session_ended)
if self.handlers_dialogue_events is not None:
self.handlers_dialogue_events(self.DIALOGUE_EVENT_ENDED, session_id, site_id)
elif msg.topic == MQTT_TOPIC_SESSION_QUEUED:
self.state_handler.set_state(State.session_queued)
if self.handlers_dialogue_events is not None:
self.handlers_dialogue_events(self.DIALOGUE_EVENT_QUEUED, session_id, site_id)
def log_info(self, message):
if self.logger is not None:
self.logger.info(message)
def log_debug(self, message):
if self.logger is not None:
self.logger.debug(message)
def log_error(self, message):
if self.logger is not None:
self.logger.error(message)
|
snipsco/snipsmanagercore
|
snipsmanagercore/server.py
|
Server.on_disconnect
|
python
|
def on_disconnect(self, client, userdata, result_code):
self.log_info("Disconnected with result code " + str(result_code))
self.state_handler.set_state(State.goodbye)
time.sleep(5)
self.thread_handler.run(target=self.start_blocking)
|
Callback when the MQTT client is disconnected. In this case,
the server waits five seconds before trying to reconnected.
:param client: the client being disconnected.
:param userdata: unused.
:param result_code: result code.
|
train
|
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/server.py#L128-L139
|
[
"def log_info(self, message):\n if self.logger is not None:\n self.logger.info(message)\n"
] |
class Server():
""" Snips core server. """
DIALOGUE_EVENT_STARTED, DIALOGUE_EVENT_ENDED, DIALOGUE_EVENT_QUEUED = range(3)
def __init__(self,
mqtt_hostname,
mqtt_port,
tts_service_id,
locale,
registry,
handle_intent,
handlers_dialogue_events=None,
handle_start_listening=None,
handle_done_listening=None,
logger=None):
""" Initialisation.
:param config: a YAML configuration.
:param assistant: the client assistant class, holding the
intent handler and intents registry.
"""
self.logger = logger
self.registry = registry
self.handle_intent = handle_intent
self.handlers_dialogue_events = handlers_dialogue_events
self.handle_start_listening = handle_start_listening
self.handle_done_listening = handle_done_listening
self.thread_handler = ThreadHandler()
self.state_handler = StateHandler(self.thread_handler)
self.client = mqtt.Client()
self.client.on_connect = self.on_connect
self.client.on_disconnect = self.on_disconnect
self.client.on_message = self.on_message
self.mqtt_hostname = mqtt_hostname
self.mqtt_port = mqtt_port
self.tts_service_id = tts_service_id
self.locale = locale
self.dialogue = SnipsDialogueAPI(self.client, tts_service_id, locale)
self.first_hotword_detected = False
def start(self):
""" Start the MQTT client. """
self.thread_handler.run(target=self.start_blocking)
self.thread_handler.start_run_loop()
def start_blocking(self, run_event):
""" Start the MQTT client, as a blocking method.
:param run_event: a run event object provided by the thread handler.
"""
topics = [("hermes/intent/#", 0), ("hermes/hotword/#", 0), ("hermes/asr/#", 0), ("hermes/nlu/#", 0),
("snipsmanager/#", 0)]
self.log_info("Connecting to {} on port {}".format(self.mqtt_hostname, str(self.mqtt_port)))
retry = 0
while True and run_event.is_set():
try:
self.log_info("Trying to connect to {}".format(self.mqtt_hostname))
self.client.connect(self.mqtt_hostname, self.mqtt_port, 60)
break
except (socket_error, Exception) as e:
self.log_info("MQTT error {}".format(e))
time.sleep(5 + int(retry / 5))
retry = retry + 1
topics = [
(MQTT_TOPIC_INTENT + '#', 0),
(MQTT_TOPIC_HOTWORD + '#', 0),
(MQTT_TOPIC_ASR + '#', 0),
(MQTT_TOPIC_SNIPSFILE, 0),
(MQTT_TOPIC_DIALOG_MANAGER + '#', 0),
("snipsmanager/#", 0)
]
self.client.subscribe(topics)
while run_event.is_set():
try:
self.client.loop()
except AttributeError as e:
self.log_info("Error in mqtt run loop {}".format(e))
time.sleep(1)
# pylint: disable=unused-argument,no-self-use
def on_connect(self, client, userdata, flags, result_code):
""" Callback when the MQTT client is connected.
:param client: the client being connected.
:param userdata: unused.
:param flags: unused.
:param result_code: result code.
"""
self.log_info("Connected with result code {}".format(result_code))
self.state_handler.set_state(State.welcome)
# pylint: disable=unused-argument
# pylint: disable=unused-argument
def on_message(self, client, userdata, msg):
""" Callback when the MQTT client received a new message.
:param client: the MQTT client.
:param userdata: unused.
:param msg: the MQTT message.
"""
if msg is None:
return
self.log_info("New message on topic {}".format(msg.topic))
self.log_debug("Payload {}".format(msg.payload))
if msg.payload is None or len(msg.payload) == 0:
pass
if msg.payload:
payload = json.loads(msg.payload.decode('utf-8'))
site_id = payload.get('siteId')
session_id = payload.get('sessionId')
if msg.topic is not None and msg.topic.startswith(MQTT_TOPIC_INTENT) and msg.payload:
payload = json.loads(msg.payload.decode('utf-8'))
intent = IntentParser.parse(payload, self.registry.intent_classes)
self.log_debug("Parsed intent: {}".format(intent))
if self.handle_intent is not None:
if intent is not None:
self.log_debug("New intent: {}".format(str(intent.intentName)))
self.handle_intent(intent, payload)
elif msg.topic is not None and msg.topic == MQTT_TOPIC_HOTWORD + "toggleOn":
self.state_handler.set_state(State.hotword_toggle_on)
elif MQTT_TOPIC_HOTWORD_DETECTED_RE.match(msg.topic):
if not self.first_hotword_detected:
self.client.publish(
"hermes/feedback/sound/toggleOff", payload=None, qos=0, retain=False)
self.first_hotword_detected = True
self.state_handler.set_state(State.hotword_detected)
if self.handle_start_listening is not None:
self.handle_start_listening()
elif msg.topic == MQTT_TOPIC_ASR + "startListening":
self.state_handler.set_state(State.asr_start_listening)
elif msg.topic == MQTT_TOPIC_ASR + "textCaptured":
self.state_handler.set_state(State.asr_text_captured)
if msg.payload is not None:
self.log_debug("Text captured: {}".format(str(msg.payload)))
if self.handle_done_listening is not None:
self.handle_done_listening()
payload = json.loads(msg.payload.decode('utf-8'))
if payload['text'] == '':
self.handle_intent(None, None)
elif msg.topic is not None and msg.topic == "hermes/nlu/intentNotRecognized":
self.handle_intent(None, None)
elif msg.topic == "snipsmanager/setSnipsfile" and msg.payload:
self.state_handler.set_state(State.asr_text_captured)
elif msg.topic == MQTT_TOPIC_SESSION_STARTED:
self.state_handler.set_state(State.session_started)
if self.handlers_dialogue_events is not None:
self.handlers_dialogue_events(self.DIALOGUE_EVENT_STARTED, session_id, site_id)
elif msg.topic == MQTT_TOPIC_SESSION_ENDED:
self.state_handler.set_state(State.session_ended)
if self.handlers_dialogue_events is not None:
self.handlers_dialogue_events(self.DIALOGUE_EVENT_ENDED, session_id, site_id)
elif msg.topic == MQTT_TOPIC_SESSION_QUEUED:
self.state_handler.set_state(State.session_queued)
if self.handlers_dialogue_events is not None:
self.handlers_dialogue_events(self.DIALOGUE_EVENT_QUEUED, session_id, site_id)
def log_info(self, message):
if self.logger is not None:
self.logger.info(message)
def log_debug(self, message):
if self.logger is not None:
self.logger.debug(message)
def log_error(self, message):
if self.logger is not None:
self.logger.error(message)
|
snipsco/snipsmanagercore
|
snipsmanagercore/server.py
|
Server.on_message
|
python
|
def on_message(self, client, userdata, msg):
if msg is None:
return
self.log_info("New message on topic {}".format(msg.topic))
self.log_debug("Payload {}".format(msg.payload))
if msg.payload is None or len(msg.payload) == 0:
pass
if msg.payload:
payload = json.loads(msg.payload.decode('utf-8'))
site_id = payload.get('siteId')
session_id = payload.get('sessionId')
if msg.topic is not None and msg.topic.startswith(MQTT_TOPIC_INTENT) and msg.payload:
payload = json.loads(msg.payload.decode('utf-8'))
intent = IntentParser.parse(payload, self.registry.intent_classes)
self.log_debug("Parsed intent: {}".format(intent))
if self.handle_intent is not None:
if intent is not None:
self.log_debug("New intent: {}".format(str(intent.intentName)))
self.handle_intent(intent, payload)
elif msg.topic is not None and msg.topic == MQTT_TOPIC_HOTWORD + "toggleOn":
self.state_handler.set_state(State.hotword_toggle_on)
elif MQTT_TOPIC_HOTWORD_DETECTED_RE.match(msg.topic):
if not self.first_hotword_detected:
self.client.publish(
"hermes/feedback/sound/toggleOff", payload=None, qos=0, retain=False)
self.first_hotword_detected = True
self.state_handler.set_state(State.hotword_detected)
if self.handle_start_listening is not None:
self.handle_start_listening()
elif msg.topic == MQTT_TOPIC_ASR + "startListening":
self.state_handler.set_state(State.asr_start_listening)
elif msg.topic == MQTT_TOPIC_ASR + "textCaptured":
self.state_handler.set_state(State.asr_text_captured)
if msg.payload is not None:
self.log_debug("Text captured: {}".format(str(msg.payload)))
if self.handle_done_listening is not None:
self.handle_done_listening()
payload = json.loads(msg.payload.decode('utf-8'))
if payload['text'] == '':
self.handle_intent(None, None)
elif msg.topic is not None and msg.topic == "hermes/nlu/intentNotRecognized":
self.handle_intent(None, None)
elif msg.topic == "snipsmanager/setSnipsfile" and msg.payload:
self.state_handler.set_state(State.asr_text_captured)
elif msg.topic == MQTT_TOPIC_SESSION_STARTED:
self.state_handler.set_state(State.session_started)
if self.handlers_dialogue_events is not None:
self.handlers_dialogue_events(self.DIALOGUE_EVENT_STARTED, session_id, site_id)
elif msg.topic == MQTT_TOPIC_SESSION_ENDED:
self.state_handler.set_state(State.session_ended)
if self.handlers_dialogue_events is not None:
self.handlers_dialogue_events(self.DIALOGUE_EVENT_ENDED, session_id, site_id)
elif msg.topic == MQTT_TOPIC_SESSION_QUEUED:
self.state_handler.set_state(State.session_queued)
if self.handlers_dialogue_events is not None:
self.handlers_dialogue_events(self.DIALOGUE_EVENT_QUEUED, session_id, site_id)
|
Callback when the MQTT client received a new message.
:param client: the MQTT client.
:param userdata: unused.
:param msg: the MQTT message.
|
train
|
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/server.py#L142-L207
|
[
"def log_info(self, message):\n if self.logger is not None:\n self.logger.info(message)\n",
"def log_debug(self, message):\n if self.logger is not None:\n self.logger.debug(message)\n"
] |
class Server():
""" Snips core server. """
DIALOGUE_EVENT_STARTED, DIALOGUE_EVENT_ENDED, DIALOGUE_EVENT_QUEUED = range(3)
def __init__(self,
mqtt_hostname,
mqtt_port,
tts_service_id,
locale,
registry,
handle_intent,
handlers_dialogue_events=None,
handle_start_listening=None,
handle_done_listening=None,
logger=None):
""" Initialisation.
:param config: a YAML configuration.
:param assistant: the client assistant class, holding the
intent handler and intents registry.
"""
self.logger = logger
self.registry = registry
self.handle_intent = handle_intent
self.handlers_dialogue_events = handlers_dialogue_events
self.handle_start_listening = handle_start_listening
self.handle_done_listening = handle_done_listening
self.thread_handler = ThreadHandler()
self.state_handler = StateHandler(self.thread_handler)
self.client = mqtt.Client()
self.client.on_connect = self.on_connect
self.client.on_disconnect = self.on_disconnect
self.client.on_message = self.on_message
self.mqtt_hostname = mqtt_hostname
self.mqtt_port = mqtt_port
self.tts_service_id = tts_service_id
self.locale = locale
self.dialogue = SnipsDialogueAPI(self.client, tts_service_id, locale)
self.first_hotword_detected = False
def start(self):
""" Start the MQTT client. """
self.thread_handler.run(target=self.start_blocking)
self.thread_handler.start_run_loop()
def start_blocking(self, run_event):
""" Start the MQTT client, as a blocking method.
:param run_event: a run event object provided by the thread handler.
"""
topics = [("hermes/intent/#", 0), ("hermes/hotword/#", 0), ("hermes/asr/#", 0), ("hermes/nlu/#", 0),
("snipsmanager/#", 0)]
self.log_info("Connecting to {} on port {}".format(self.mqtt_hostname, str(self.mqtt_port)))
retry = 0
while True and run_event.is_set():
try:
self.log_info("Trying to connect to {}".format(self.mqtt_hostname))
self.client.connect(self.mqtt_hostname, self.mqtt_port, 60)
break
except (socket_error, Exception) as e:
self.log_info("MQTT error {}".format(e))
time.sleep(5 + int(retry / 5))
retry = retry + 1
topics = [
(MQTT_TOPIC_INTENT + '#', 0),
(MQTT_TOPIC_HOTWORD + '#', 0),
(MQTT_TOPIC_ASR + '#', 0),
(MQTT_TOPIC_SNIPSFILE, 0),
(MQTT_TOPIC_DIALOG_MANAGER + '#', 0),
("snipsmanager/#", 0)
]
self.client.subscribe(topics)
while run_event.is_set():
try:
self.client.loop()
except AttributeError as e:
self.log_info("Error in mqtt run loop {}".format(e))
time.sleep(1)
# pylint: disable=unused-argument,no-self-use
def on_connect(self, client, userdata, flags, result_code):
""" Callback when the MQTT client is connected.
:param client: the client being connected.
:param userdata: unused.
:param flags: unused.
:param result_code: result code.
"""
self.log_info("Connected with result code {}".format(result_code))
self.state_handler.set_state(State.welcome)
# pylint: disable=unused-argument
def on_disconnect(self, client, userdata, result_code):
""" Callback when the MQTT client is disconnected. In this case,
the server waits five seconds before trying to reconnected.
:param client: the client being disconnected.
:param userdata: unused.
:param result_code: result code.
"""
self.log_info("Disconnected with result code " + str(result_code))
self.state_handler.set_state(State.goodbye)
time.sleep(5)
self.thread_handler.run(target=self.start_blocking)
# pylint: disable=unused-argument
def log_info(self, message):
if self.logger is not None:
self.logger.info(message)
def log_debug(self, message):
if self.logger is not None:
self.logger.debug(message)
def log_error(self, message):
if self.logger is not None:
self.logger.error(message)
|
gapato/livestreamer-curses
|
src/livestreamer_curses/streamlist.py
|
ProcessList.put
|
python
|
def put(self, stream, cmd):
if len(self.q) < self.max_size:
if stream['id'] in self.q:
raise QueueDuplicate
p = self.call(stream, cmd)
self.q[stream['id']] = p
else:
raise QueueFull
|
Spawn a new background process
|
train
|
https://github.com/gapato/livestreamer-curses/blob/d841a421422db8c5b5a8bcfcff6b3ddd7ea8a64b/src/livestreamer_curses/streamlist.py#L63-L72
| null |
class ProcessList(object):
""" Small class to store and handle calls to a given callable """
def __init__(self, f, max_size=10):
""" Create a ProcessList
f : callable for which a process will be spawned for each call to put
max_size : the maximum size of the ProcessList
"""
self.q = {}
self.max_size = max_size
self.call = f
def __del__(self):
self.terminate()
def full(self):
""" Check is the List is full, returns a bool """
return len(self.q) == self.max_size
def empty(self):
""" Check is the List is full, returns a bool """
return len(self.q) == 0
def get_finished(self):
""" Clean up terminated processes and returns the list of their ids """
indices = []
for idf, v in self.q.items():
if v.poll() != None:
indices.append(idf)
for i in indices:
self.q.pop(i)
return indices
def get_process(self, idf):
""" Get a process by id, returns None if there is no match """
return self.q.get(idf)
def get_stdouts(self):
""" Get the list of stdout of each process """
souts = []
for v in self.q.values():
souts.append(v.stdout)
return souts
def terminate_process(self, idf):
""" Terminate a process by id """
try:
p = self.q.pop(idf)
p.terminate()
return p
except:
return None
def terminate(self):
""" Terminate all processes """
for w in self.q.values():
try:
w.terminate()
except:
pass
self.q = {}
|
gapato/livestreamer-curses
|
src/livestreamer_curses/streamlist.py
|
ProcessList.get_finished
|
python
|
def get_finished(self):
indices = []
for idf, v in self.q.items():
if v.poll() != None:
indices.append(idf)
for i in indices:
self.q.pop(i)
return indices
|
Clean up terminated processes and returns the list of their ids
|
train
|
https://github.com/gapato/livestreamer-curses/blob/d841a421422db8c5b5a8bcfcff6b3ddd7ea8a64b/src/livestreamer_curses/streamlist.py#L74-L83
| null |
class ProcessList(object):
""" Small class to store and handle calls to a given callable """
def __init__(self, f, max_size=10):
""" Create a ProcessList
f : callable for which a process will be spawned for each call to put
max_size : the maximum size of the ProcessList
"""
self.q = {}
self.max_size = max_size
self.call = f
def __del__(self):
self.terminate()
def full(self):
""" Check is the List is full, returns a bool """
return len(self.q) == self.max_size
def empty(self):
""" Check is the List is full, returns a bool """
return len(self.q) == 0
def put(self, stream, cmd):
""" Spawn a new background process """
if len(self.q) < self.max_size:
if stream['id'] in self.q:
raise QueueDuplicate
p = self.call(stream, cmd)
self.q[stream['id']] = p
else:
raise QueueFull
def get_process(self, idf):
""" Get a process by id, returns None if there is no match """
return self.q.get(idf)
def get_stdouts(self):
""" Get the list of stdout of each process """
souts = []
for v in self.q.values():
souts.append(v.stdout)
return souts
def terminate_process(self, idf):
""" Terminate a process by id """
try:
p = self.q.pop(idf)
p.terminate()
return p
except:
return None
def terminate(self):
""" Terminate all processes """
for w in self.q.values():
try:
w.terminate()
except:
pass
self.q = {}
|
gapato/livestreamer-curses
|
src/livestreamer_curses/streamlist.py
|
ProcessList.get_stdouts
|
python
|
def get_stdouts(self):
souts = []
for v in self.q.values():
souts.append(v.stdout)
return souts
|
Get the list of stdout of each process
|
train
|
https://github.com/gapato/livestreamer-curses/blob/d841a421422db8c5b5a8bcfcff6b3ddd7ea8a64b/src/livestreamer_curses/streamlist.py#L89-L94
| null |
class ProcessList(object):
""" Small class to store and handle calls to a given callable """
def __init__(self, f, max_size=10):
""" Create a ProcessList
f : callable for which a process will be spawned for each call to put
max_size : the maximum size of the ProcessList
"""
self.q = {}
self.max_size = max_size
self.call = f
def __del__(self):
self.terminate()
def full(self):
""" Check is the List is full, returns a bool """
return len(self.q) == self.max_size
def empty(self):
""" Check is the List is full, returns a bool """
return len(self.q) == 0
def put(self, stream, cmd):
""" Spawn a new background process """
if len(self.q) < self.max_size:
if stream['id'] in self.q:
raise QueueDuplicate
p = self.call(stream, cmd)
self.q[stream['id']] = p
else:
raise QueueFull
def get_finished(self):
""" Clean up terminated processes and returns the list of their ids """
indices = []
for idf, v in self.q.items():
if v.poll() != None:
indices.append(idf)
for i in indices:
self.q.pop(i)
return indices
def get_process(self, idf):
""" Get a process by id, returns None if there is no match """
return self.q.get(idf)
def terminate_process(self, idf):
""" Terminate a process by id """
try:
p = self.q.pop(idf)
p.terminate()
return p
except:
return None
def terminate(self):
""" Terminate all processes """
for w in self.q.values():
try:
w.terminate()
except:
pass
self.q = {}
|
gapato/livestreamer-curses
|
src/livestreamer_curses/streamlist.py
|
ProcessList.terminate_process
|
python
|
def terminate_process(self, idf):
try:
p = self.q.pop(idf)
p.terminate()
return p
except:
return None
|
Terminate a process by id
|
train
|
https://github.com/gapato/livestreamer-curses/blob/d841a421422db8c5b5a8bcfcff6b3ddd7ea8a64b/src/livestreamer_curses/streamlist.py#L96-L103
| null |
class ProcessList(object):
""" Small class to store and handle calls to a given callable """
def __init__(self, f, max_size=10):
""" Create a ProcessList
f : callable for which a process will be spawned for each call to put
max_size : the maximum size of the ProcessList
"""
self.q = {}
self.max_size = max_size
self.call = f
def __del__(self):
self.terminate()
def full(self):
""" Check is the List is full, returns a bool """
return len(self.q) == self.max_size
def empty(self):
""" Check is the List is full, returns a bool """
return len(self.q) == 0
def put(self, stream, cmd):
""" Spawn a new background process """
if len(self.q) < self.max_size:
if stream['id'] in self.q:
raise QueueDuplicate
p = self.call(stream, cmd)
self.q[stream['id']] = p
else:
raise QueueFull
def get_finished(self):
""" Clean up terminated processes and returns the list of their ids """
indices = []
for idf, v in self.q.items():
if v.poll() != None:
indices.append(idf)
for i in indices:
self.q.pop(i)
return indices
def get_process(self, idf):
""" Get a process by id, returns None if there is no match """
return self.q.get(idf)
def get_stdouts(self):
""" Get the list of stdout of each process """
souts = []
for v in self.q.values():
souts.append(v.stdout)
return souts
def terminate(self):
""" Terminate all processes """
for w in self.q.values():
try:
w.terminate()
except:
pass
self.q = {}
|
gapato/livestreamer-curses
|
src/livestreamer_curses/streamlist.py
|
ProcessList.terminate
|
python
|
def terminate(self):
for w in self.q.values():
try:
w.terminate()
except:
pass
self.q = {}
|
Terminate all processes
|
train
|
https://github.com/gapato/livestreamer-curses/blob/d841a421422db8c5b5a8bcfcff6b3ddd7ea8a64b/src/livestreamer_curses/streamlist.py#L105-L113
| null |
class ProcessList(object):
""" Small class to store and handle calls to a given callable """
def __init__(self, f, max_size=10):
""" Create a ProcessList
f : callable for which a process will be spawned for each call to put
max_size : the maximum size of the ProcessList
"""
self.q = {}
self.max_size = max_size
self.call = f
def __del__(self):
self.terminate()
def full(self):
""" Check is the List is full, returns a bool """
return len(self.q) == self.max_size
def empty(self):
""" Check is the List is full, returns a bool """
return len(self.q) == 0
def put(self, stream, cmd):
""" Spawn a new background process """
if len(self.q) < self.max_size:
if stream['id'] in self.q:
raise QueueDuplicate
p = self.call(stream, cmd)
self.q[stream['id']] = p
else:
raise QueueFull
def get_finished(self):
""" Clean up terminated processes and returns the list of their ids """
indices = []
for idf, v in self.q.items():
if v.poll() != None:
indices.append(idf)
for i in indices:
self.q.pop(i)
return indices
def get_process(self, idf):
""" Get a process by id, returns None if there is no match """
return self.q.get(idf)
def get_stdouts(self):
""" Get the list of stdout of each process """
souts = []
for v in self.q.values():
souts.append(v.stdout)
return souts
def terminate_process(self, idf):
""" Terminate a process by id """
try:
p = self.q.pop(idf)
p.terminate()
return p
except:
return None
|
gapato/livestreamer-curses
|
src/livestreamer_curses/streamlist.py
|
StreamList.init
|
python
|
def init(self, s):
# Hide cursor
curses.curs_set(0)
self.s = s
self.s.keypad(1)
self.set_screen_size()
self.pads = {}
self.offsets = {}
self.init_help()
self.init_streams_pad()
self.current_pad = 'streams'
self.set_title(TITLE_STRING)
self.got_g = False
signal.signal(28, self.resize)
if self.config.CHECK_ONLINE_ON_START:
self.check_online_streams()
self.set_status('Ready')
|
Initialize the text interface
|
train
|
https://github.com/gapato/livestreamer-curses/blob/d841a421422db8c5b5a8bcfcff6b3ddd7ea8a64b/src/livestreamer_curses/streamlist.py#L217-L244
|
[
"def set_screen_size(self):\n \"\"\" Setup screen size and padding\n\n We have need 2 free lines at the top and 2 free lines at the bottom\n\n \"\"\"\n height, width = self.getheightwidth()\n curses.resizeterm(height, width)\n self.pad_x = 0\n self.max_y, self.max_x = (height-1, width-1)\n self.pad_h = height-3\n self.pad_w = width-2*self.pad_x\n",
"def set_title(self, msg):\n \"\"\" Set first header line text \"\"\"\n self.s.move(0, 0)\n self.overwrite_line(msg, curses.A_REVERSE)\n",
"def init_help(self):\n help_pad_length = 27 # there should be a neater way to do this\n h = curses.newpad(help_pad_length, self.pad_w)\n h.keypad(1)\n\n h.addstr( 0, 0, 'STREAM MANAGEMENT', curses.A_BOLD)\n h.addstr( 2, 0, ' Enter : start stream')\n h.addstr( 3, 0, ' s : stop stream')\n h.addstr( 4, 0, ' r : change stream resolution')\n h.addstr( 5, 0, ' n : change stream name')\n h.addstr( 6, 0, ' u : change stream URL')\n h.addstr( 7, 0, ' c : reset stream view count')\n h.addstr( 8, 0, ' a : add stream')\n h.addstr( 9, 0, ' d : delete stream')\n\n h.addstr(11, 0, ' l : show command line')\n h.addstr(12, 0, ' L : cycle command line')\n\n h.addstr(15, 0, 'NAVIGATION', curses.A_BOLD)\n h.addstr(17, 0, ' j/up : up one line')\n h.addstr(18, 0, ' k/down: down one line')\n h.addstr(19, 0, ' f : filter streams')\n h.addstr(20, 0, ' F : clear filter')\n h.addstr(21, 0, ' o : toggle offline streams')\n h.addstr(22, 0, ' O : check for online streams')\n h.addstr(23, 0, ' gg : go to top')\n h.addstr(24, 0, ' G : go to bottom')\n h.addstr(25, 0, ' h/? : show this help')\n h.addstr(26, 0, ' q : quit')\n\n self.pads['help'] = h\n self.offsets['help'] = 0\n",
"def init_streams_pad(self, start_row=0):\n \"\"\" Create a curses pad and populate it with a line by stream \"\"\"\n y = 0\n pad = curses.newpad(max(1,len(self.filtered_streams)), self.pad_w)\n pad.keypad(1)\n for s in self.filtered_streams:\n pad.addstr(y, 0, self.format_stream_line(s))\n y+=1\n self.offsets['streams'] = 0\n pad.move(start_row, 0)\n if not self.no_stream_shown:\n pad.chgat(curses.A_REVERSE)\n self.pads['streams'] = pad\n",
"def set_status(self, status):\n self.status = status\n self.redraw_status()\n",
"def check_online_streams(self):\n self.all_streams_offline = True\n self.set_status(' Checking online streams...')\n\n done_queue = queue.Queue()\n\n def check_stream_managed(args):\n url, queue = args\n status = self._check_stream(url)\n done_queue.put(url)\n return status\n\n pool = Pool(self.config.CHECK_ONLINE_THREADS)\n args = [(s['url'], done_queue) for s in self.streams]\n statuses = pool.map_async(check_stream_managed, args)\n n_streams = len(self.streams)\n\n while not statuses.ready():\n sleep(0.1)\n self.set_status(' Checked {0}/{1} streams...'.format(done_queue.qsize(), n_streams))\n self.s.refresh()\n\n statuses = statuses.get()\n for i, s in enumerate(self.streams):\n s['online'] = statuses[i]\n if s['online']:\n self.all_streams_offline = False\n\n self.refilter_streams()\n self.last_autocheck = int(time())\n\n pool.close()\n"
] |
class StreamList(object):
def __init__(self, filename, config, list_streams=False, init_stream_list=None):
""" Init and try to load a stream list, nothing about curses yet """
global TITLE_STRING
self.db_was_read = False
# Open the storage (create it if necessary)
try:
db_dir = os.path.dirname(filename)
if not os.path.exists(db_dir):
os.makedirs(db_dir)
f = shelve.open(filename, 'c')
except Exception:
raise ShelveError(
'Database could not be opened, another livestreamer-curses instance might be already running. '
'Please note that a database created with Python 2.x cannot be used with Python 3.x and vice versa.'
)
self.max_id = 0
if init_stream_list:
f['streams'] = init_stream_list
for i, s in enumerate(f['streams']):
s['id'] = s.get('id') or i
s['seen'] = s.get('seen') or 0
s['last_seen'] = s.get('last_seen') or 0
self.max_id = i
f.sync()
# Sort streams by view count
try:
self.streams = sorted(f['streams'], key=lambda s:s['seen'], reverse=True)
for s in self.streams:
# Max id, needed when adding a new stream
self.max_id = max(self.max_id, s['id'])
s['online'] = 2
if list_streams:
print(json.dumps(self.streams))
f.close()
sys.exit(0)
except:
self.streams = []
self.db_was_read = True
self.filtered_streams = list(self.streams)
self.filter = ''
self.all_streams_offline = None
self.show_offline_streams = False
self.config = config
TITLE_STRING = TITLE_STRING.format(self.config.VERSION)
self.cmd_list = list(map(shlex.split, self.config.LIVESTREAMER_COMMANDS))
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.last_autocheck = 0
self.default_res = self.config.DEFAULT_RESOLUTION
self.store = f
self.store.sync()
self.no_streams = self.streams == []
self.no_stream_shown = self.no_streams
self.q = ProcessList(StreamPlayer().play)
self.livestreamer = livestreamer.Livestreamer()
def __del__(self):
""" Stop playing streams and sync storage """
try:
self.q.terminate()
if self.db_was_read:
self.store['cmd'] = self.cmd
self.store['streams'] = self.streams
self.store.close()
except:
pass
def __call__(self, s):
# Terminal initialization
self.init(s)
# Main event loop
self.run()
def getheightwidth(self):
""" getwidth() -> (int, int)
Return the height and width of the console in characters
https://groups.google.com/forum/#!msg/comp.lang.python/CpUszNNXUQM/QADpl11Z-nAJ"""
try:
return int(os.environ["LINES"]), int(os.environ["COLUMNS"])
except KeyError:
height, width = struct.unpack(
"hhhh", ioctl(0, termios.TIOCGWINSZ ,"\000"*8))[0:2]
if not height:
return 25, 80
return height, width
def resize(self, signum, obj):
""" handler for SIGWINCH """
self.s.clear()
stream_cursor = self.pads['streams'].getyx()[0]
for pad in self.pads.values():
pad.clear()
self.s.refresh()
self.set_screen_size()
self.set_title(TITLE_STRING)
self.init_help()
self.init_streams_pad()
self.move(stream_cursor, absolute=True, pad_name='streams', refresh=False)
self.s.refresh()
self.show()
def run(self):
""" Main event loop """
# Show stream list
self.show_streams()
while True:
self.s.refresh()
# See if any stream has ended
self.check_stopped_streams()
# Wait on stdin or on the streams output
souts = self.q.get_stdouts()
souts.append(sys.stdin)
try:
(r, w, x) = select.select(souts, [], [], 1)
except select.error:
continue
if not r:
if self.config.CHECK_ONLINE_INTERVAL <= 0: continue
cur_time = int(time())
time_delta = cur_time - self.last_autocheck
if time_delta > self.config.CHECK_ONLINE_INTERVAL:
self.check_online_streams()
self.set_status('Next check at {0}'.format(
strftime('%H:%M:%S', localtime(time() + self.config.CHECK_ONLINE_INTERVAL))
))
continue
for fd in r:
if fd != sys.stdin:
# Set the new status line only if non-empty
msg = fd.readline()
if msg:
self.set_status(msg[:-1])
else:
# Main event loop
c = self.pads[self.current_pad].getch()
if c == curses.KEY_UP or c == ord('k'):
self.move(-1)
elif c == curses.KEY_DOWN or c == ord('j'):
self.move(1)
elif c == ord('f'):
if self.current_pad == 'streams':
self.filter_streams()
elif c == ord('F'):
if self.current_pad == 'streams':
self.clear_filter()
elif c == ord('g'):
if self.got_g:
self.move(0, absolute=True)
self.got_g = False
continue
self.got_g = True
elif c == ord('G'):
self.move(len(self.filtered_streams)-1, absolute=True)
elif c == ord('q'):
if self.current_pad == 'streams':
self.q.terminate()
return
else:
self.show_streams()
elif c == 27: # ESC
if self.current_pad != 'streams':
self.show_streams()
if self.current_pad == 'help':
continue
elif c == 10:
self.play_stream()
elif c == ord('s'):
self.stop_stream()
elif c == ord('c'):
self.reset_stream()
elif c == ord('n'):
self.edit_stream('name')
elif c == ord('r'):
self.edit_stream('res')
elif c == ord('u'):
self.edit_stream('url')
elif c == ord('l'):
self.show_commandline()
elif c == ord('L'):
self.shift_commandline()
elif c == ord('a'):
self.prompt_new_stream()
elif c == ord('d'):
self.delete_stream()
elif c == ord('o'):
self.show_offline_streams ^= True
self.refilter_streams()
elif c == ord('O'):
self.check_online_streams()
elif c == ord('h') or c == ord('?'):
self.show_help()
def set_screen_size(self):
""" Setup screen size and padding
We have need 2 free lines at the top and 2 free lines at the bottom
"""
height, width = self.getheightwidth()
curses.resizeterm(height, width)
self.pad_x = 0
self.max_y, self.max_x = (height-1, width-1)
self.pad_h = height-3
self.pad_w = width-2*self.pad_x
def overwrite_line(self, msg, attr=curses.A_NORMAL):
self.s.clrtoeol()
self.s.addstr(msg, attr)
self.s.chgat(attr)
def set_title(self, msg):
""" Set first header line text """
self.s.move(0, 0)
self.overwrite_line(msg, curses.A_REVERSE)
def set_header(self, msg):
""" Set second head line text """
self.s.move(1, 0)
self.overwrite_line(msg, attr=curses.A_NORMAL)
def set_footer(self, msg, reverse=True):
""" Set first footer line text """
self.s.move(self.max_y-1, 0)
if reverse:
self.overwrite_line(msg, attr=curses.A_REVERSE)
else:
self.overwrite_line(msg, attr=curses.A_NORMAL)
def clear_footer(self):
self.s.move(self.max_y-1, 0)
self.overwrite_line('')
def init_help(self):
help_pad_length = 27 # there should be a neater way to do this
h = curses.newpad(help_pad_length, self.pad_w)
h.keypad(1)
h.addstr( 0, 0, 'STREAM MANAGEMENT', curses.A_BOLD)
h.addstr( 2, 0, ' Enter : start stream')
h.addstr( 3, 0, ' s : stop stream')
h.addstr( 4, 0, ' r : change stream resolution')
h.addstr( 5, 0, ' n : change stream name')
h.addstr( 6, 0, ' u : change stream URL')
h.addstr( 7, 0, ' c : reset stream view count')
h.addstr( 8, 0, ' a : add stream')
h.addstr( 9, 0, ' d : delete stream')
h.addstr(11, 0, ' l : show command line')
h.addstr(12, 0, ' L : cycle command line')
h.addstr(15, 0, 'NAVIGATION', curses.A_BOLD)
h.addstr(17, 0, ' j/up : up one line')
h.addstr(18, 0, ' k/down: down one line')
h.addstr(19, 0, ' f : filter streams')
h.addstr(20, 0, ' F : clear filter')
h.addstr(21, 0, ' o : toggle offline streams')
h.addstr(22, 0, ' O : check for online streams')
h.addstr(23, 0, ' gg : go to top')
h.addstr(24, 0, ' G : go to bottom')
h.addstr(25, 0, ' h/? : show this help')
h.addstr(26, 0, ' q : quit')
self.pads['help'] = h
self.offsets['help'] = 0
def show(self):
funcs = {
'streams' : self.show_streams,
'help' : self.show_help
}
funcs[self.current_pad]()
def show_help(self):
""" Redraw Help screen and wait for any input to leave """
self.s.move(1,0)
self.s.clrtobot()
self.set_header('Help'.center(self.pad_w))
self.set_footer(' ESC or \'q\' to return to main menu')
self.s.refresh()
self.current_pad = 'help'
self.refresh_current_pad()
def init_streams_pad(self, start_row=0):
""" Create a curses pad and populate it with a line by stream """
y = 0
pad = curses.newpad(max(1,len(self.filtered_streams)), self.pad_w)
pad.keypad(1)
for s in self.filtered_streams:
pad.addstr(y, 0, self.format_stream_line(s))
y+=1
self.offsets['streams'] = 0
pad.move(start_row, 0)
if not self.no_stream_shown:
pad.chgat(curses.A_REVERSE)
self.pads['streams'] = pad
def show_streams(self):
self.s.move(1,0)
self.s.clrtobot()
self.current_pad = 'streams'
if self.no_stream_shown:
self.hide_streams_pad()
if self.no_streams:
self.s.addstr(5, 5, 'It seems you don\'t have any stream yet')
self.s.addstr(6, 5, 'Hit \'a\' to add a new one')
self.s.addstr(8, 5, 'Hit \'?\' for help')
elif self.all_streams_offline and not self.show_offline_streams:
self.s.addstr(5, 5, 'All streams are currently offline')
self.s.addstr(6, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(7, 5, 'Hit \'O\' to refresh')
self.s.addstr(9, 5, 'Hit \'?\' for help')
else:
self.s.addstr(5, 5, 'No stream matches your filter')
self.s.addstr(6, 5, 'Hit \'f\' to change filter')
self.s.addstr(7, 5, 'Hit \'F\' to clear')
self.s.addstr(8, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(10, 5, 'Hit \'?\' for help')
else:
idf = 'ID'.center(ID_FIELD_WIDTH)
name = 'Name'.center(NAME_FIELD_WIDTH)
res = 'Resolution'.center(RES_FIELD_WIDTH)
views = 'Views'.center(VIEWS_FIELD_WIDTH)
self.set_header('{0} {1} {2} {3} Status'.format(idf, name, res, views))
self.redraw_stream_footer()
self.redraw_status()
self.s.refresh()
if not self.no_stream_shown:
self.refresh_current_pad()
def hide_streams_pad(self):
pad = self.pads.get('streams')
if pad:
pad.refresh(0, 0, 2, 0, 2, 0)
def refresh_current_pad(self):
pad = self.pads[self.current_pad]
pad.refresh(self.offsets[self.current_pad], 0, 2, self.pad_x, self.pad_h, self.pad_w)
def move(self, direction, absolute=False, pad_name=None, refresh=True):
""" Scroll the current pad
direction : (int) move by one in the given direction
-1 is up, 1 is down. If absolute is True,
go to position direction.
Behaviour is affected by cursor_line and scroll_only below
absolute : (bool)
"""
# pad in this lists have the current line highlighted
cursor_line = [ 'streams' ]
# pads in this list will be moved screen-wise as opposed to line-wise
# if absolute is set, will go all the way top or all the way down depending
# on direction
scroll_only = [ 'help' ]
if not pad_name:
pad_name = self.current_pad
pad = self.pads[pad_name]
if pad_name == 'streams' and self.no_streams:
return
(row, col) = pad.getyx()
new_row = row
offset = self.offsets[pad_name]
new_offset = offset
if pad_name in scroll_only:
if absolute:
if direction > 0:
new_offset = pad.getmaxyx()[0] - self.pad_h + 1
else:
new_offset = 0
else:
if direction > 0:
new_offset = min(pad.getmaxyx()[0] - self.pad_h + 1, offset + self.pad_h)
elif offset > 0:
new_offset = max(0, offset - self.pad_h)
else:
if absolute and direction >= 0 and direction < pad.getmaxyx()[0]:
if direction < offset:
new_offset = direction
elif direction > offset + self.pad_h - 2:
new_offset = direction - self.pad_h + 2
new_row = direction
else:
if direction == -1 and row > 0:
if row == offset:
new_offset -= 1
new_row = row-1
elif direction == 1 and row < len(self.filtered_streams)-1:
if row == offset + self.pad_h - 2:
new_offset += 1
new_row = row+1
if pad_name in cursor_line:
pad.move(row, 0)
pad.chgat(curses.A_NORMAL)
self.offsets[pad_name] = new_offset
pad.move(new_row, 0)
if pad_name in cursor_line:
pad.chgat(curses.A_REVERSE)
if pad_name == 'streams':
self.redraw_stream_footer()
if refresh:
self.refresh_current_pad()
def format_stream_line(self, stream):
idf = '{0} '.format(stream['id']).rjust(ID_FIELD_WIDTH)
name = ' {0}'.format(stream['name'][:NAME_FIELD_WIDTH-2]).ljust(NAME_FIELD_WIDTH)
res = ' {0}'.format(stream['res'][:RES_FIELD_WIDTH-2]).ljust(RES_FIELD_WIDTH)
views = '{0} '.format(stream['seen']).rjust(VIEWS_FIELD_WIDTH)
p = self.q.get_process(stream['id']) != None
if p:
indicator = self.config.INDICATORS[4] # playing
else:
indicator = self.config.INDICATORS[stream['online']]
return '{0} {1} {2} {3} {4}'.format(idf, name, res, views, indicator)
def redraw_current_line(self):
""" Redraw the highlighted line """
if self.no_streams:
return
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
pad = self.pads['streams']
pad.move(row, 0)
pad.clrtoeol()
pad.addstr(row, 0, self.format_stream_line(s), curses.A_REVERSE)
pad.chgat(curses.A_REVERSE)
pad.move(row, 0)
self.refresh_current_pad()
def set_status(self, status):
self.status = status
self.redraw_status()
def redraw_status(self):
self.s.move(self.max_y, 0)
self.overwrite_line(self.status[:self.max_x], curses.A_NORMAL)
self.s.refresh()
def redraw_stream_footer(self):
if not self.no_stream_shown:
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
self.set_footer('{0}/{1} {2} {3}'.format(row+1, len(self.filtered_streams), s['url'], s['res']))
self.s.refresh()
def check_stopped_streams(self):
finished = self.q.get_finished()
for f in finished:
for s in self.streams:
try:
i = self.filtered_streams.index(s)
except ValueError:
continue
if f == s['id']:
self.set_footer('Stream {0} has stopped'.format(s['name']))
if i == self.pads[self.current_pad].getyx()[0]:
attr = curses.A_REVERSE
else:
attr = curses.A_NORMAL
self.pads['streams'].addstr(i, PLAYING_FIELD_OFFSET,
self.config.INDICATORS[s['online']], attr)
self.refresh_current_pad()
def _check_stream(self, url):
try:
plugin = self.livestreamer.resolve_url(url)
avail_streams = plugin.get_streams()
if avail_streams:
return 1
return 0
except:
return 3
def check_online_streams(self):
self.all_streams_offline = True
self.set_status(' Checking online streams...')
done_queue = queue.Queue()
def check_stream_managed(args):
url, queue = args
status = self._check_stream(url)
done_queue.put(url)
return status
pool = Pool(self.config.CHECK_ONLINE_THREADS)
args = [(s['url'], done_queue) for s in self.streams]
statuses = pool.map_async(check_stream_managed, args)
n_streams = len(self.streams)
while not statuses.ready():
sleep(0.1)
self.set_status(' Checked {0}/{1} streams...'.format(done_queue.qsize(), n_streams))
self.s.refresh()
statuses = statuses.get()
for i, s in enumerate(self.streams):
s['online'] = statuses[i]
if s['online']:
self.all_streams_offline = False
self.refilter_streams()
self.last_autocheck = int(time())
pool.close()
def prompt_input(self, prompt=''):
self.s.move(self.max_y, 0)
self.s.clrtoeol()
self.s.addstr(prompt)
curses.curs_set(1)
curses.echo()
r = self.s.getstr().decode()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y, 0)
self.s.clrtoeol()
return r
def prompt_confirmation(self, prompt='', def_yes=False):
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if def_yes:
hint = '[y]/n'
else:
hint = 'y/[n]'
self.s.addstr('{0} {1} '.format(prompt, hint))
curses.curs_set(1)
curses.echo()
r = self.s.getch()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if r == ord('y'):
return True
elif r == ord('n'):
return False
else:
return def_yes
def sync_store(self):
self.store['streams'] = self.streams
self.store.sync()
def bump_stream(self, stream, throttle=False):
t = int(time())
# only bump if stream was last started some time ago
if throttle and t - stream['last_seen'] < 60*1:
return
stream['seen'] += 1
stream['last_seen'] = t
self.sync_store()
def find_stream(self, sel, key='id'):
for s in self.streams:
if s[key] == sel:
return s
return None
def clear_filter(self):
self.filter = ''
self.refilter_streams()
def filter_streams(self):
self.filter = self.prompt_input('Filter: ').lower()
self.refilter_streams()
def refilter_streams(self, quiet=False):
self.filtered_streams = []
for s in self.streams:
if ((self.show_offline_streams or s['online'] in [1,2])
and (self.filter in s['name'].lower() or self.filter in s['url'].lower())):
self.filtered_streams.append(s)
self.filtered_streams.sort(key=lambda s:s['seen'], reverse=True)
self.no_stream_shown = len(self.filtered_streams) == 0
if not quiet:
self.status = ' Filter: {0} ({1}/{2} matches, {3} showing offline streams)'.format(
self.filter or '<empty>', len(self.filtered_streams), len(self.streams),
'' if self.show_offline_streams else 'NOT')
self.init_streams_pad()
self.redraw_stream_footer()
self.show_streams()
self.redraw_status()
def add_stream(self, name, url, res=None, bump=False):
ex_stream = self.find_stream(url, key='url')
if ex_stream:
if bump:
self.bump_stream(ex_stream)
else:
if bump:
seen = 1
last_seen = int(time())
else:
seen = last_seen = 0
if not self.streams:
idf = 1
else:
self.max_id += 1
idf = self.max_id
s_res = res or self.default_res
if type(s_res) == str:
actual_res = s_res
elif type(s_res) == dict:
actual_res = DEFAULT_RESOLUTION_HARD
for k,v in s_res.items():
if k in url:
actual_res = v
break
elif callable(s_res):
actual_res = s_res(url) or DEFAULT_RESOLUTION_HARD
else:
actual_res = DEFAULT_RESOLUTION_HARD
self.set_status(' Checking if new stream is online...')
self.s.refresh()
online = self._check_stream(url)
new_stream = {
'id' : idf,
'name' : name,
'seen' : seen,
'last_seen' : last_seen,
'res' : actual_res,
'url' : url,
'online' : online
}
self.streams.append(new_stream)
self.no_streams = False
self.refilter_streams()
self.sync_store()
def delete_stream(self):
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Delete stream {0}?'.format(s['name'])):
return
self.filtered_streams.remove(s)
self.streams.remove(s)
pad.deleteln()
self.sync_store()
if not self.streams:
self.no_streams = True
if not self.filtered_streams:
self.no_stream_shown = True
if pad.getyx()[0] == len(self.filtered_streams) and not self.no_stream_shown:
self.move(-1, refresh=False)
pad.chgat(curses.A_REVERSE)
self.redraw_current_line()
self.show_streams()
def reset_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Reset stream {0}?'.format(s['name'])):
return
s['seen'] = 0
s['last_seen'] = 0
self.redraw_current_line()
self.sync_store()
def edit_stream(self, attr):
prompt_info = {
'name' : 'Name',
'url' : 'URL',
'res' : 'Resolution'
}
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
new_val = self.prompt_input('{0} (empty to cancel): '.format(prompt_info[attr]))
if new_val != '':
s[attr] = new_val
self.redraw_current_line()
self.redraw_status()
self.redraw_stream_footer()
def show_commandline(self):
self.set_footer('{0}/{1} {2}'.format(self.cmd_index+1, len(self.cmd_list), ' '.join(self.cmd)))
def shift_commandline(self):
self.cmd_index += 1
if self.cmd_index == len(self.cmd_list):
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.show_commandline()
def prompt_new_stream(self):
url = self.prompt_input('New stream URL (empty to cancel): ')
name = url.split('/')[-1]
if name:
self.add_stream(name, url)
self.move(len(self.filtered_streams)-1, absolute=True, refresh=False)
self.show_streams()
def play_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
try:
self.q.put(s, self.cmd)
self.bump_stream(s, throttle=True)
self.redraw_current_line()
self.refresh_current_pad()
except Exception as e:
if type(e) == QueueDuplicate:
self.set_footer('This stream is already playing')
elif type(e) == OSError:
self.set_footer('/!\ Faulty command line: {0}'.format(e.strerror))
else:
raise e
def stop_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
p = self.q.terminate_process(s['id'])
if p:
self.redraw_current_line()
self.redraw_stream_footer()
self.redraw_status()
|
gapato/livestreamer-curses
|
src/livestreamer_curses/streamlist.py
|
StreamList.getheightwidth
|
python
|
def getheightwidth(self):
try:
return int(os.environ["LINES"]), int(os.environ["COLUMNS"])
except KeyError:
height, width = struct.unpack(
"hhhh", ioctl(0, termios.TIOCGWINSZ ,"\000"*8))[0:2]
if not height:
return 25, 80
return height, width
|
getwidth() -> (int, int)
Return the height and width of the console in characters
https://groups.google.com/forum/#!msg/comp.lang.python/CpUszNNXUQM/QADpl11Z-nAJ
|
train
|
https://github.com/gapato/livestreamer-curses/blob/d841a421422db8c5b5a8bcfcff6b3ddd7ea8a64b/src/livestreamer_curses/streamlist.py#L246-L258
| null |
class StreamList(object):
def __init__(self, filename, config, list_streams=False, init_stream_list=None):
""" Init and try to load a stream list, nothing about curses yet """
global TITLE_STRING
self.db_was_read = False
# Open the storage (create it if necessary)
try:
db_dir = os.path.dirname(filename)
if not os.path.exists(db_dir):
os.makedirs(db_dir)
f = shelve.open(filename, 'c')
except Exception:
raise ShelveError(
'Database could not be opened, another livestreamer-curses instance might be already running. '
'Please note that a database created with Python 2.x cannot be used with Python 3.x and vice versa.'
)
self.max_id = 0
if init_stream_list:
f['streams'] = init_stream_list
for i, s in enumerate(f['streams']):
s['id'] = s.get('id') or i
s['seen'] = s.get('seen') or 0
s['last_seen'] = s.get('last_seen') or 0
self.max_id = i
f.sync()
# Sort streams by view count
try:
self.streams = sorted(f['streams'], key=lambda s:s['seen'], reverse=True)
for s in self.streams:
# Max id, needed when adding a new stream
self.max_id = max(self.max_id, s['id'])
s['online'] = 2
if list_streams:
print(json.dumps(self.streams))
f.close()
sys.exit(0)
except:
self.streams = []
self.db_was_read = True
self.filtered_streams = list(self.streams)
self.filter = ''
self.all_streams_offline = None
self.show_offline_streams = False
self.config = config
TITLE_STRING = TITLE_STRING.format(self.config.VERSION)
self.cmd_list = list(map(shlex.split, self.config.LIVESTREAMER_COMMANDS))
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.last_autocheck = 0
self.default_res = self.config.DEFAULT_RESOLUTION
self.store = f
self.store.sync()
self.no_streams = self.streams == []
self.no_stream_shown = self.no_streams
self.q = ProcessList(StreamPlayer().play)
self.livestreamer = livestreamer.Livestreamer()
def __del__(self):
""" Stop playing streams and sync storage """
try:
self.q.terminate()
if self.db_was_read:
self.store['cmd'] = self.cmd
self.store['streams'] = self.streams
self.store.close()
except:
pass
def __call__(self, s):
# Terminal initialization
self.init(s)
# Main event loop
self.run()
def init(self, s):
""" Initialize the text interface """
# Hide cursor
curses.curs_set(0)
self.s = s
self.s.keypad(1)
self.set_screen_size()
self.pads = {}
self.offsets = {}
self.init_help()
self.init_streams_pad()
self.current_pad = 'streams'
self.set_title(TITLE_STRING)
self.got_g = False
signal.signal(28, self.resize)
if self.config.CHECK_ONLINE_ON_START:
self.check_online_streams()
self.set_status('Ready')
def resize(self, signum, obj):
""" handler for SIGWINCH """
self.s.clear()
stream_cursor = self.pads['streams'].getyx()[0]
for pad in self.pads.values():
pad.clear()
self.s.refresh()
self.set_screen_size()
self.set_title(TITLE_STRING)
self.init_help()
self.init_streams_pad()
self.move(stream_cursor, absolute=True, pad_name='streams', refresh=False)
self.s.refresh()
self.show()
def run(self):
""" Main event loop """
# Show stream list
self.show_streams()
while True:
self.s.refresh()
# See if any stream has ended
self.check_stopped_streams()
# Wait on stdin or on the streams output
souts = self.q.get_stdouts()
souts.append(sys.stdin)
try:
(r, w, x) = select.select(souts, [], [], 1)
except select.error:
continue
if not r:
if self.config.CHECK_ONLINE_INTERVAL <= 0: continue
cur_time = int(time())
time_delta = cur_time - self.last_autocheck
if time_delta > self.config.CHECK_ONLINE_INTERVAL:
self.check_online_streams()
self.set_status('Next check at {0}'.format(
strftime('%H:%M:%S', localtime(time() + self.config.CHECK_ONLINE_INTERVAL))
))
continue
for fd in r:
if fd != sys.stdin:
# Set the new status line only if non-empty
msg = fd.readline()
if msg:
self.set_status(msg[:-1])
else:
# Main event loop
c = self.pads[self.current_pad].getch()
if c == curses.KEY_UP or c == ord('k'):
self.move(-1)
elif c == curses.KEY_DOWN or c == ord('j'):
self.move(1)
elif c == ord('f'):
if self.current_pad == 'streams':
self.filter_streams()
elif c == ord('F'):
if self.current_pad == 'streams':
self.clear_filter()
elif c == ord('g'):
if self.got_g:
self.move(0, absolute=True)
self.got_g = False
continue
self.got_g = True
elif c == ord('G'):
self.move(len(self.filtered_streams)-1, absolute=True)
elif c == ord('q'):
if self.current_pad == 'streams':
self.q.terminate()
return
else:
self.show_streams()
elif c == 27: # ESC
if self.current_pad != 'streams':
self.show_streams()
if self.current_pad == 'help':
continue
elif c == 10:
self.play_stream()
elif c == ord('s'):
self.stop_stream()
elif c == ord('c'):
self.reset_stream()
elif c == ord('n'):
self.edit_stream('name')
elif c == ord('r'):
self.edit_stream('res')
elif c == ord('u'):
self.edit_stream('url')
elif c == ord('l'):
self.show_commandline()
elif c == ord('L'):
self.shift_commandline()
elif c == ord('a'):
self.prompt_new_stream()
elif c == ord('d'):
self.delete_stream()
elif c == ord('o'):
self.show_offline_streams ^= True
self.refilter_streams()
elif c == ord('O'):
self.check_online_streams()
elif c == ord('h') or c == ord('?'):
self.show_help()
def set_screen_size(self):
""" Setup screen size and padding
We have need 2 free lines at the top and 2 free lines at the bottom
"""
height, width = self.getheightwidth()
curses.resizeterm(height, width)
self.pad_x = 0
self.max_y, self.max_x = (height-1, width-1)
self.pad_h = height-3
self.pad_w = width-2*self.pad_x
def overwrite_line(self, msg, attr=curses.A_NORMAL):
self.s.clrtoeol()
self.s.addstr(msg, attr)
self.s.chgat(attr)
def set_title(self, msg):
""" Set first header line text """
self.s.move(0, 0)
self.overwrite_line(msg, curses.A_REVERSE)
def set_header(self, msg):
""" Set second head line text """
self.s.move(1, 0)
self.overwrite_line(msg, attr=curses.A_NORMAL)
def set_footer(self, msg, reverse=True):
""" Set first footer line text """
self.s.move(self.max_y-1, 0)
if reverse:
self.overwrite_line(msg, attr=curses.A_REVERSE)
else:
self.overwrite_line(msg, attr=curses.A_NORMAL)
def clear_footer(self):
self.s.move(self.max_y-1, 0)
self.overwrite_line('')
def init_help(self):
help_pad_length = 27 # there should be a neater way to do this
h = curses.newpad(help_pad_length, self.pad_w)
h.keypad(1)
h.addstr( 0, 0, 'STREAM MANAGEMENT', curses.A_BOLD)
h.addstr( 2, 0, ' Enter : start stream')
h.addstr( 3, 0, ' s : stop stream')
h.addstr( 4, 0, ' r : change stream resolution')
h.addstr( 5, 0, ' n : change stream name')
h.addstr( 6, 0, ' u : change stream URL')
h.addstr( 7, 0, ' c : reset stream view count')
h.addstr( 8, 0, ' a : add stream')
h.addstr( 9, 0, ' d : delete stream')
h.addstr(11, 0, ' l : show command line')
h.addstr(12, 0, ' L : cycle command line')
h.addstr(15, 0, 'NAVIGATION', curses.A_BOLD)
h.addstr(17, 0, ' j/up : up one line')
h.addstr(18, 0, ' k/down: down one line')
h.addstr(19, 0, ' f : filter streams')
h.addstr(20, 0, ' F : clear filter')
h.addstr(21, 0, ' o : toggle offline streams')
h.addstr(22, 0, ' O : check for online streams')
h.addstr(23, 0, ' gg : go to top')
h.addstr(24, 0, ' G : go to bottom')
h.addstr(25, 0, ' h/? : show this help')
h.addstr(26, 0, ' q : quit')
self.pads['help'] = h
self.offsets['help'] = 0
def show(self):
funcs = {
'streams' : self.show_streams,
'help' : self.show_help
}
funcs[self.current_pad]()
def show_help(self):
""" Redraw Help screen and wait for any input to leave """
self.s.move(1,0)
self.s.clrtobot()
self.set_header('Help'.center(self.pad_w))
self.set_footer(' ESC or \'q\' to return to main menu')
self.s.refresh()
self.current_pad = 'help'
self.refresh_current_pad()
def init_streams_pad(self, start_row=0):
""" Create a curses pad and populate it with a line by stream """
y = 0
pad = curses.newpad(max(1,len(self.filtered_streams)), self.pad_w)
pad.keypad(1)
for s in self.filtered_streams:
pad.addstr(y, 0, self.format_stream_line(s))
y+=1
self.offsets['streams'] = 0
pad.move(start_row, 0)
if not self.no_stream_shown:
pad.chgat(curses.A_REVERSE)
self.pads['streams'] = pad
def show_streams(self):
self.s.move(1,0)
self.s.clrtobot()
self.current_pad = 'streams'
if self.no_stream_shown:
self.hide_streams_pad()
if self.no_streams:
self.s.addstr(5, 5, 'It seems you don\'t have any stream yet')
self.s.addstr(6, 5, 'Hit \'a\' to add a new one')
self.s.addstr(8, 5, 'Hit \'?\' for help')
elif self.all_streams_offline and not self.show_offline_streams:
self.s.addstr(5, 5, 'All streams are currently offline')
self.s.addstr(6, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(7, 5, 'Hit \'O\' to refresh')
self.s.addstr(9, 5, 'Hit \'?\' for help')
else:
self.s.addstr(5, 5, 'No stream matches your filter')
self.s.addstr(6, 5, 'Hit \'f\' to change filter')
self.s.addstr(7, 5, 'Hit \'F\' to clear')
self.s.addstr(8, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(10, 5, 'Hit \'?\' for help')
else:
idf = 'ID'.center(ID_FIELD_WIDTH)
name = 'Name'.center(NAME_FIELD_WIDTH)
res = 'Resolution'.center(RES_FIELD_WIDTH)
views = 'Views'.center(VIEWS_FIELD_WIDTH)
self.set_header('{0} {1} {2} {3} Status'.format(idf, name, res, views))
self.redraw_stream_footer()
self.redraw_status()
self.s.refresh()
if not self.no_stream_shown:
self.refresh_current_pad()
def hide_streams_pad(self):
pad = self.pads.get('streams')
if pad:
pad.refresh(0, 0, 2, 0, 2, 0)
def refresh_current_pad(self):
pad = self.pads[self.current_pad]
pad.refresh(self.offsets[self.current_pad], 0, 2, self.pad_x, self.pad_h, self.pad_w)
def move(self, direction, absolute=False, pad_name=None, refresh=True):
""" Scroll the current pad
direction : (int) move by one in the given direction
-1 is up, 1 is down. If absolute is True,
go to position direction.
Behaviour is affected by cursor_line and scroll_only below
absolute : (bool)
"""
# pad in this lists have the current line highlighted
cursor_line = [ 'streams' ]
# pads in this list will be moved screen-wise as opposed to line-wise
# if absolute is set, will go all the way top or all the way down depending
# on direction
scroll_only = [ 'help' ]
if not pad_name:
pad_name = self.current_pad
pad = self.pads[pad_name]
if pad_name == 'streams' and self.no_streams:
return
(row, col) = pad.getyx()
new_row = row
offset = self.offsets[pad_name]
new_offset = offset
if pad_name in scroll_only:
if absolute:
if direction > 0:
new_offset = pad.getmaxyx()[0] - self.pad_h + 1
else:
new_offset = 0
else:
if direction > 0:
new_offset = min(pad.getmaxyx()[0] - self.pad_h + 1, offset + self.pad_h)
elif offset > 0:
new_offset = max(0, offset - self.pad_h)
else:
if absolute and direction >= 0 and direction < pad.getmaxyx()[0]:
if direction < offset:
new_offset = direction
elif direction > offset + self.pad_h - 2:
new_offset = direction - self.pad_h + 2
new_row = direction
else:
if direction == -1 and row > 0:
if row == offset:
new_offset -= 1
new_row = row-1
elif direction == 1 and row < len(self.filtered_streams)-1:
if row == offset + self.pad_h - 2:
new_offset += 1
new_row = row+1
if pad_name in cursor_line:
pad.move(row, 0)
pad.chgat(curses.A_NORMAL)
self.offsets[pad_name] = new_offset
pad.move(new_row, 0)
if pad_name in cursor_line:
pad.chgat(curses.A_REVERSE)
if pad_name == 'streams':
self.redraw_stream_footer()
if refresh:
self.refresh_current_pad()
def format_stream_line(self, stream):
idf = '{0} '.format(stream['id']).rjust(ID_FIELD_WIDTH)
name = ' {0}'.format(stream['name'][:NAME_FIELD_WIDTH-2]).ljust(NAME_FIELD_WIDTH)
res = ' {0}'.format(stream['res'][:RES_FIELD_WIDTH-2]).ljust(RES_FIELD_WIDTH)
views = '{0} '.format(stream['seen']).rjust(VIEWS_FIELD_WIDTH)
p = self.q.get_process(stream['id']) != None
if p:
indicator = self.config.INDICATORS[4] # playing
else:
indicator = self.config.INDICATORS[stream['online']]
return '{0} {1} {2} {3} {4}'.format(idf, name, res, views, indicator)
def redraw_current_line(self):
""" Redraw the highlighted line """
if self.no_streams:
return
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
pad = self.pads['streams']
pad.move(row, 0)
pad.clrtoeol()
pad.addstr(row, 0, self.format_stream_line(s), curses.A_REVERSE)
pad.chgat(curses.A_REVERSE)
pad.move(row, 0)
self.refresh_current_pad()
def set_status(self, status):
self.status = status
self.redraw_status()
def redraw_status(self):
self.s.move(self.max_y, 0)
self.overwrite_line(self.status[:self.max_x], curses.A_NORMAL)
self.s.refresh()
def redraw_stream_footer(self):
if not self.no_stream_shown:
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
self.set_footer('{0}/{1} {2} {3}'.format(row+1, len(self.filtered_streams), s['url'], s['res']))
self.s.refresh()
def check_stopped_streams(self):
finished = self.q.get_finished()
for f in finished:
for s in self.streams:
try:
i = self.filtered_streams.index(s)
except ValueError:
continue
if f == s['id']:
self.set_footer('Stream {0} has stopped'.format(s['name']))
if i == self.pads[self.current_pad].getyx()[0]:
attr = curses.A_REVERSE
else:
attr = curses.A_NORMAL
self.pads['streams'].addstr(i, PLAYING_FIELD_OFFSET,
self.config.INDICATORS[s['online']], attr)
self.refresh_current_pad()
def _check_stream(self, url):
try:
plugin = self.livestreamer.resolve_url(url)
avail_streams = plugin.get_streams()
if avail_streams:
return 1
return 0
except:
return 3
def check_online_streams(self):
self.all_streams_offline = True
self.set_status(' Checking online streams...')
done_queue = queue.Queue()
def check_stream_managed(args):
url, queue = args
status = self._check_stream(url)
done_queue.put(url)
return status
pool = Pool(self.config.CHECK_ONLINE_THREADS)
args = [(s['url'], done_queue) for s in self.streams]
statuses = pool.map_async(check_stream_managed, args)
n_streams = len(self.streams)
while not statuses.ready():
sleep(0.1)
self.set_status(' Checked {0}/{1} streams...'.format(done_queue.qsize(), n_streams))
self.s.refresh()
statuses = statuses.get()
for i, s in enumerate(self.streams):
s['online'] = statuses[i]
if s['online']:
self.all_streams_offline = False
self.refilter_streams()
self.last_autocheck = int(time())
pool.close()
def prompt_input(self, prompt=''):
self.s.move(self.max_y, 0)
self.s.clrtoeol()
self.s.addstr(prompt)
curses.curs_set(1)
curses.echo()
r = self.s.getstr().decode()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y, 0)
self.s.clrtoeol()
return r
def prompt_confirmation(self, prompt='', def_yes=False):
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if def_yes:
hint = '[y]/n'
else:
hint = 'y/[n]'
self.s.addstr('{0} {1} '.format(prompt, hint))
curses.curs_set(1)
curses.echo()
r = self.s.getch()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if r == ord('y'):
return True
elif r == ord('n'):
return False
else:
return def_yes
def sync_store(self):
self.store['streams'] = self.streams
self.store.sync()
def bump_stream(self, stream, throttle=False):
t = int(time())
# only bump if stream was last started some time ago
if throttle and t - stream['last_seen'] < 60*1:
return
stream['seen'] += 1
stream['last_seen'] = t
self.sync_store()
def find_stream(self, sel, key='id'):
for s in self.streams:
if s[key] == sel:
return s
return None
def clear_filter(self):
self.filter = ''
self.refilter_streams()
def filter_streams(self):
self.filter = self.prompt_input('Filter: ').lower()
self.refilter_streams()
def refilter_streams(self, quiet=False):
self.filtered_streams = []
for s in self.streams:
if ((self.show_offline_streams or s['online'] in [1,2])
and (self.filter in s['name'].lower() or self.filter in s['url'].lower())):
self.filtered_streams.append(s)
self.filtered_streams.sort(key=lambda s:s['seen'], reverse=True)
self.no_stream_shown = len(self.filtered_streams) == 0
if not quiet:
self.status = ' Filter: {0} ({1}/{2} matches, {3} showing offline streams)'.format(
self.filter or '<empty>', len(self.filtered_streams), len(self.streams),
'' if self.show_offline_streams else 'NOT')
self.init_streams_pad()
self.redraw_stream_footer()
self.show_streams()
self.redraw_status()
def add_stream(self, name, url, res=None, bump=False):
ex_stream = self.find_stream(url, key='url')
if ex_stream:
if bump:
self.bump_stream(ex_stream)
else:
if bump:
seen = 1
last_seen = int(time())
else:
seen = last_seen = 0
if not self.streams:
idf = 1
else:
self.max_id += 1
idf = self.max_id
s_res = res or self.default_res
if type(s_res) == str:
actual_res = s_res
elif type(s_res) == dict:
actual_res = DEFAULT_RESOLUTION_HARD
for k,v in s_res.items():
if k in url:
actual_res = v
break
elif callable(s_res):
actual_res = s_res(url) or DEFAULT_RESOLUTION_HARD
else:
actual_res = DEFAULT_RESOLUTION_HARD
self.set_status(' Checking if new stream is online...')
self.s.refresh()
online = self._check_stream(url)
new_stream = {
'id' : idf,
'name' : name,
'seen' : seen,
'last_seen' : last_seen,
'res' : actual_res,
'url' : url,
'online' : online
}
self.streams.append(new_stream)
self.no_streams = False
self.refilter_streams()
self.sync_store()
def delete_stream(self):
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Delete stream {0}?'.format(s['name'])):
return
self.filtered_streams.remove(s)
self.streams.remove(s)
pad.deleteln()
self.sync_store()
if not self.streams:
self.no_streams = True
if not self.filtered_streams:
self.no_stream_shown = True
if pad.getyx()[0] == len(self.filtered_streams) and not self.no_stream_shown:
self.move(-1, refresh=False)
pad.chgat(curses.A_REVERSE)
self.redraw_current_line()
self.show_streams()
def reset_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Reset stream {0}?'.format(s['name'])):
return
s['seen'] = 0
s['last_seen'] = 0
self.redraw_current_line()
self.sync_store()
def edit_stream(self, attr):
prompt_info = {
'name' : 'Name',
'url' : 'URL',
'res' : 'Resolution'
}
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
new_val = self.prompt_input('{0} (empty to cancel): '.format(prompt_info[attr]))
if new_val != '':
s[attr] = new_val
self.redraw_current_line()
self.redraw_status()
self.redraw_stream_footer()
def show_commandline(self):
self.set_footer('{0}/{1} {2}'.format(self.cmd_index+1, len(self.cmd_list), ' '.join(self.cmd)))
def shift_commandline(self):
self.cmd_index += 1
if self.cmd_index == len(self.cmd_list):
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.show_commandline()
def prompt_new_stream(self):
url = self.prompt_input('New stream URL (empty to cancel): ')
name = url.split('/')[-1]
if name:
self.add_stream(name, url)
self.move(len(self.filtered_streams)-1, absolute=True, refresh=False)
self.show_streams()
def play_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
try:
self.q.put(s, self.cmd)
self.bump_stream(s, throttle=True)
self.redraw_current_line()
self.refresh_current_pad()
except Exception as e:
if type(e) == QueueDuplicate:
self.set_footer('This stream is already playing')
elif type(e) == OSError:
self.set_footer('/!\ Faulty command line: {0}'.format(e.strerror))
else:
raise e
def stop_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
p = self.q.terminate_process(s['id'])
if p:
self.redraw_current_line()
self.redraw_stream_footer()
self.redraw_status()
|
gapato/livestreamer-curses
|
src/livestreamer_curses/streamlist.py
|
StreamList.resize
|
python
|
def resize(self, signum, obj):
self.s.clear()
stream_cursor = self.pads['streams'].getyx()[0]
for pad in self.pads.values():
pad.clear()
self.s.refresh()
self.set_screen_size()
self.set_title(TITLE_STRING)
self.init_help()
self.init_streams_pad()
self.move(stream_cursor, absolute=True, pad_name='streams', refresh=False)
self.s.refresh()
self.show()
|
handler for SIGWINCH
|
train
|
https://github.com/gapato/livestreamer-curses/blob/d841a421422db8c5b5a8bcfcff6b3ddd7ea8a64b/src/livestreamer_curses/streamlist.py#L260-L273
|
[
"def set_screen_size(self):\n \"\"\" Setup screen size and padding\n\n We have need 2 free lines at the top and 2 free lines at the bottom\n\n \"\"\"\n height, width = self.getheightwidth()\n curses.resizeterm(height, width)\n self.pad_x = 0\n self.max_y, self.max_x = (height-1, width-1)\n self.pad_h = height-3\n self.pad_w = width-2*self.pad_x\n",
"def set_title(self, msg):\n \"\"\" Set first header line text \"\"\"\n self.s.move(0, 0)\n self.overwrite_line(msg, curses.A_REVERSE)\n",
"def init_help(self):\n help_pad_length = 27 # there should be a neater way to do this\n h = curses.newpad(help_pad_length, self.pad_w)\n h.keypad(1)\n\n h.addstr( 0, 0, 'STREAM MANAGEMENT', curses.A_BOLD)\n h.addstr( 2, 0, ' Enter : start stream')\n h.addstr( 3, 0, ' s : stop stream')\n h.addstr( 4, 0, ' r : change stream resolution')\n h.addstr( 5, 0, ' n : change stream name')\n h.addstr( 6, 0, ' u : change stream URL')\n h.addstr( 7, 0, ' c : reset stream view count')\n h.addstr( 8, 0, ' a : add stream')\n h.addstr( 9, 0, ' d : delete stream')\n\n h.addstr(11, 0, ' l : show command line')\n h.addstr(12, 0, ' L : cycle command line')\n\n h.addstr(15, 0, 'NAVIGATION', curses.A_BOLD)\n h.addstr(17, 0, ' j/up : up one line')\n h.addstr(18, 0, ' k/down: down one line')\n h.addstr(19, 0, ' f : filter streams')\n h.addstr(20, 0, ' F : clear filter')\n h.addstr(21, 0, ' o : toggle offline streams')\n h.addstr(22, 0, ' O : check for online streams')\n h.addstr(23, 0, ' gg : go to top')\n h.addstr(24, 0, ' G : go to bottom')\n h.addstr(25, 0, ' h/? : show this help')\n h.addstr(26, 0, ' q : quit')\n\n self.pads['help'] = h\n self.offsets['help'] = 0\n",
"def show(self):\n funcs = {\n 'streams' : self.show_streams,\n 'help' : self.show_help\n }\n funcs[self.current_pad]()\n",
"def init_streams_pad(self, start_row=0):\n \"\"\" Create a curses pad and populate it with a line by stream \"\"\"\n y = 0\n pad = curses.newpad(max(1,len(self.filtered_streams)), self.pad_w)\n pad.keypad(1)\n for s in self.filtered_streams:\n pad.addstr(y, 0, self.format_stream_line(s))\n y+=1\n self.offsets['streams'] = 0\n pad.move(start_row, 0)\n if not self.no_stream_shown:\n pad.chgat(curses.A_REVERSE)\n self.pads['streams'] = pad\n",
"def move(self, direction, absolute=False, pad_name=None, refresh=True):\n \"\"\" Scroll the current pad\n\n direction : (int) move by one in the given direction\n -1 is up, 1 is down. If absolute is True,\n go to position direction.\n Behaviour is affected by cursor_line and scroll_only below\n absolute : (bool)\n \"\"\"\n\n # pad in this lists have the current line highlighted\n cursor_line = [ 'streams' ]\n\n # pads in this list will be moved screen-wise as opposed to line-wise\n # if absolute is set, will go all the way top or all the way down depending\n # on direction\n scroll_only = [ 'help' ]\n\n if not pad_name:\n pad_name = self.current_pad\n pad = self.pads[pad_name]\n if pad_name == 'streams' and self.no_streams:\n return\n (row, col) = pad.getyx()\n new_row = row\n offset = self.offsets[pad_name]\n new_offset = offset\n if pad_name in scroll_only:\n if absolute:\n if direction > 0:\n new_offset = pad.getmaxyx()[0] - self.pad_h + 1\n else:\n new_offset = 0\n else:\n if direction > 0:\n new_offset = min(pad.getmaxyx()[0] - self.pad_h + 1, offset + self.pad_h)\n elif offset > 0:\n new_offset = max(0, offset - self.pad_h)\n else:\n if absolute and direction >= 0 and direction < pad.getmaxyx()[0]:\n if direction < offset:\n new_offset = direction\n elif direction > offset + self.pad_h - 2:\n new_offset = direction - self.pad_h + 2\n new_row = direction\n else:\n if direction == -1 and row > 0:\n if row == offset:\n new_offset -= 1\n new_row = row-1\n elif direction == 1 and row < len(self.filtered_streams)-1:\n if row == offset + self.pad_h - 2:\n new_offset += 1\n new_row = row+1\n if pad_name in cursor_line:\n pad.move(row, 0)\n pad.chgat(curses.A_NORMAL)\n self.offsets[pad_name] = new_offset\n pad.move(new_row, 0)\n if pad_name in cursor_line:\n pad.chgat(curses.A_REVERSE)\n if pad_name == 'streams':\n self.redraw_stream_footer()\n if refresh:\n self.refresh_current_pad()\n"
] |
class StreamList(object):
def __init__(self, filename, config, list_streams=False, init_stream_list=None):
""" Init and try to load a stream list, nothing about curses yet """
global TITLE_STRING
self.db_was_read = False
# Open the storage (create it if necessary)
try:
db_dir = os.path.dirname(filename)
if not os.path.exists(db_dir):
os.makedirs(db_dir)
f = shelve.open(filename, 'c')
except Exception:
raise ShelveError(
'Database could not be opened, another livestreamer-curses instance might be already running. '
'Please note that a database created with Python 2.x cannot be used with Python 3.x and vice versa.'
)
self.max_id = 0
if init_stream_list:
f['streams'] = init_stream_list
for i, s in enumerate(f['streams']):
s['id'] = s.get('id') or i
s['seen'] = s.get('seen') or 0
s['last_seen'] = s.get('last_seen') or 0
self.max_id = i
f.sync()
# Sort streams by view count
try:
self.streams = sorted(f['streams'], key=lambda s:s['seen'], reverse=True)
for s in self.streams:
# Max id, needed when adding a new stream
self.max_id = max(self.max_id, s['id'])
s['online'] = 2
if list_streams:
print(json.dumps(self.streams))
f.close()
sys.exit(0)
except:
self.streams = []
self.db_was_read = True
self.filtered_streams = list(self.streams)
self.filter = ''
self.all_streams_offline = None
self.show_offline_streams = False
self.config = config
TITLE_STRING = TITLE_STRING.format(self.config.VERSION)
self.cmd_list = list(map(shlex.split, self.config.LIVESTREAMER_COMMANDS))
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.last_autocheck = 0
self.default_res = self.config.DEFAULT_RESOLUTION
self.store = f
self.store.sync()
self.no_streams = self.streams == []
self.no_stream_shown = self.no_streams
self.q = ProcessList(StreamPlayer().play)
self.livestreamer = livestreamer.Livestreamer()
def __del__(self):
""" Stop playing streams and sync storage """
try:
self.q.terminate()
if self.db_was_read:
self.store['cmd'] = self.cmd
self.store['streams'] = self.streams
self.store.close()
except:
pass
def __call__(self, s):
# Terminal initialization
self.init(s)
# Main event loop
self.run()
def init(self, s):
""" Initialize the text interface """
# Hide cursor
curses.curs_set(0)
self.s = s
self.s.keypad(1)
self.set_screen_size()
self.pads = {}
self.offsets = {}
self.init_help()
self.init_streams_pad()
self.current_pad = 'streams'
self.set_title(TITLE_STRING)
self.got_g = False
signal.signal(28, self.resize)
if self.config.CHECK_ONLINE_ON_START:
self.check_online_streams()
self.set_status('Ready')
def getheightwidth(self):
""" getwidth() -> (int, int)
Return the height and width of the console in characters
https://groups.google.com/forum/#!msg/comp.lang.python/CpUszNNXUQM/QADpl11Z-nAJ"""
try:
return int(os.environ["LINES"]), int(os.environ["COLUMNS"])
except KeyError:
height, width = struct.unpack(
"hhhh", ioctl(0, termios.TIOCGWINSZ ,"\000"*8))[0:2]
if not height:
return 25, 80
return height, width
def run(self):
""" Main event loop """
# Show stream list
self.show_streams()
while True:
self.s.refresh()
# See if any stream has ended
self.check_stopped_streams()
# Wait on stdin or on the streams output
souts = self.q.get_stdouts()
souts.append(sys.stdin)
try:
(r, w, x) = select.select(souts, [], [], 1)
except select.error:
continue
if not r:
if self.config.CHECK_ONLINE_INTERVAL <= 0: continue
cur_time = int(time())
time_delta = cur_time - self.last_autocheck
if time_delta > self.config.CHECK_ONLINE_INTERVAL:
self.check_online_streams()
self.set_status('Next check at {0}'.format(
strftime('%H:%M:%S', localtime(time() + self.config.CHECK_ONLINE_INTERVAL))
))
continue
for fd in r:
if fd != sys.stdin:
# Set the new status line only if non-empty
msg = fd.readline()
if msg:
self.set_status(msg[:-1])
else:
# Main event loop
c = self.pads[self.current_pad].getch()
if c == curses.KEY_UP or c == ord('k'):
self.move(-1)
elif c == curses.KEY_DOWN or c == ord('j'):
self.move(1)
elif c == ord('f'):
if self.current_pad == 'streams':
self.filter_streams()
elif c == ord('F'):
if self.current_pad == 'streams':
self.clear_filter()
elif c == ord('g'):
if self.got_g:
self.move(0, absolute=True)
self.got_g = False
continue
self.got_g = True
elif c == ord('G'):
self.move(len(self.filtered_streams)-1, absolute=True)
elif c == ord('q'):
if self.current_pad == 'streams':
self.q.terminate()
return
else:
self.show_streams()
elif c == 27: # ESC
if self.current_pad != 'streams':
self.show_streams()
if self.current_pad == 'help':
continue
elif c == 10:
self.play_stream()
elif c == ord('s'):
self.stop_stream()
elif c == ord('c'):
self.reset_stream()
elif c == ord('n'):
self.edit_stream('name')
elif c == ord('r'):
self.edit_stream('res')
elif c == ord('u'):
self.edit_stream('url')
elif c == ord('l'):
self.show_commandline()
elif c == ord('L'):
self.shift_commandline()
elif c == ord('a'):
self.prompt_new_stream()
elif c == ord('d'):
self.delete_stream()
elif c == ord('o'):
self.show_offline_streams ^= True
self.refilter_streams()
elif c == ord('O'):
self.check_online_streams()
elif c == ord('h') or c == ord('?'):
self.show_help()
def set_screen_size(self):
""" Setup screen size and padding
We have need 2 free lines at the top and 2 free lines at the bottom
"""
height, width = self.getheightwidth()
curses.resizeterm(height, width)
self.pad_x = 0
self.max_y, self.max_x = (height-1, width-1)
self.pad_h = height-3
self.pad_w = width-2*self.pad_x
def overwrite_line(self, msg, attr=curses.A_NORMAL):
self.s.clrtoeol()
self.s.addstr(msg, attr)
self.s.chgat(attr)
def set_title(self, msg):
""" Set first header line text """
self.s.move(0, 0)
self.overwrite_line(msg, curses.A_REVERSE)
def set_header(self, msg):
""" Set second head line text """
self.s.move(1, 0)
self.overwrite_line(msg, attr=curses.A_NORMAL)
def set_footer(self, msg, reverse=True):
""" Set first footer line text """
self.s.move(self.max_y-1, 0)
if reverse:
self.overwrite_line(msg, attr=curses.A_REVERSE)
else:
self.overwrite_line(msg, attr=curses.A_NORMAL)
def clear_footer(self):
self.s.move(self.max_y-1, 0)
self.overwrite_line('')
def init_help(self):
help_pad_length = 27 # there should be a neater way to do this
h = curses.newpad(help_pad_length, self.pad_w)
h.keypad(1)
h.addstr( 0, 0, 'STREAM MANAGEMENT', curses.A_BOLD)
h.addstr( 2, 0, ' Enter : start stream')
h.addstr( 3, 0, ' s : stop stream')
h.addstr( 4, 0, ' r : change stream resolution')
h.addstr( 5, 0, ' n : change stream name')
h.addstr( 6, 0, ' u : change stream URL')
h.addstr( 7, 0, ' c : reset stream view count')
h.addstr( 8, 0, ' a : add stream')
h.addstr( 9, 0, ' d : delete stream')
h.addstr(11, 0, ' l : show command line')
h.addstr(12, 0, ' L : cycle command line')
h.addstr(15, 0, 'NAVIGATION', curses.A_BOLD)
h.addstr(17, 0, ' j/up : up one line')
h.addstr(18, 0, ' k/down: down one line')
h.addstr(19, 0, ' f : filter streams')
h.addstr(20, 0, ' F : clear filter')
h.addstr(21, 0, ' o : toggle offline streams')
h.addstr(22, 0, ' O : check for online streams')
h.addstr(23, 0, ' gg : go to top')
h.addstr(24, 0, ' G : go to bottom')
h.addstr(25, 0, ' h/? : show this help')
h.addstr(26, 0, ' q : quit')
self.pads['help'] = h
self.offsets['help'] = 0
def show(self):
funcs = {
'streams' : self.show_streams,
'help' : self.show_help
}
funcs[self.current_pad]()
def show_help(self):
""" Redraw Help screen and wait for any input to leave """
self.s.move(1,0)
self.s.clrtobot()
self.set_header('Help'.center(self.pad_w))
self.set_footer(' ESC or \'q\' to return to main menu')
self.s.refresh()
self.current_pad = 'help'
self.refresh_current_pad()
def init_streams_pad(self, start_row=0):
""" Create a curses pad and populate it with a line by stream """
y = 0
pad = curses.newpad(max(1,len(self.filtered_streams)), self.pad_w)
pad.keypad(1)
for s in self.filtered_streams:
pad.addstr(y, 0, self.format_stream_line(s))
y+=1
self.offsets['streams'] = 0
pad.move(start_row, 0)
if not self.no_stream_shown:
pad.chgat(curses.A_REVERSE)
self.pads['streams'] = pad
def show_streams(self):
self.s.move(1,0)
self.s.clrtobot()
self.current_pad = 'streams'
if self.no_stream_shown:
self.hide_streams_pad()
if self.no_streams:
self.s.addstr(5, 5, 'It seems you don\'t have any stream yet')
self.s.addstr(6, 5, 'Hit \'a\' to add a new one')
self.s.addstr(8, 5, 'Hit \'?\' for help')
elif self.all_streams_offline and not self.show_offline_streams:
self.s.addstr(5, 5, 'All streams are currently offline')
self.s.addstr(6, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(7, 5, 'Hit \'O\' to refresh')
self.s.addstr(9, 5, 'Hit \'?\' for help')
else:
self.s.addstr(5, 5, 'No stream matches your filter')
self.s.addstr(6, 5, 'Hit \'f\' to change filter')
self.s.addstr(7, 5, 'Hit \'F\' to clear')
self.s.addstr(8, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(10, 5, 'Hit \'?\' for help')
else:
idf = 'ID'.center(ID_FIELD_WIDTH)
name = 'Name'.center(NAME_FIELD_WIDTH)
res = 'Resolution'.center(RES_FIELD_WIDTH)
views = 'Views'.center(VIEWS_FIELD_WIDTH)
self.set_header('{0} {1} {2} {3} Status'.format(idf, name, res, views))
self.redraw_stream_footer()
self.redraw_status()
self.s.refresh()
if not self.no_stream_shown:
self.refresh_current_pad()
def hide_streams_pad(self):
pad = self.pads.get('streams')
if pad:
pad.refresh(0, 0, 2, 0, 2, 0)
def refresh_current_pad(self):
pad = self.pads[self.current_pad]
pad.refresh(self.offsets[self.current_pad], 0, 2, self.pad_x, self.pad_h, self.pad_w)
def move(self, direction, absolute=False, pad_name=None, refresh=True):
""" Scroll the current pad
direction : (int) move by one in the given direction
-1 is up, 1 is down. If absolute is True,
go to position direction.
Behaviour is affected by cursor_line and scroll_only below
absolute : (bool)
"""
# pad in this lists have the current line highlighted
cursor_line = [ 'streams' ]
# pads in this list will be moved screen-wise as opposed to line-wise
# if absolute is set, will go all the way top or all the way down depending
# on direction
scroll_only = [ 'help' ]
if not pad_name:
pad_name = self.current_pad
pad = self.pads[pad_name]
if pad_name == 'streams' and self.no_streams:
return
(row, col) = pad.getyx()
new_row = row
offset = self.offsets[pad_name]
new_offset = offset
if pad_name in scroll_only:
if absolute:
if direction > 0:
new_offset = pad.getmaxyx()[0] - self.pad_h + 1
else:
new_offset = 0
else:
if direction > 0:
new_offset = min(pad.getmaxyx()[0] - self.pad_h + 1, offset + self.pad_h)
elif offset > 0:
new_offset = max(0, offset - self.pad_h)
else:
if absolute and direction >= 0 and direction < pad.getmaxyx()[0]:
if direction < offset:
new_offset = direction
elif direction > offset + self.pad_h - 2:
new_offset = direction - self.pad_h + 2
new_row = direction
else:
if direction == -1 and row > 0:
if row == offset:
new_offset -= 1
new_row = row-1
elif direction == 1 and row < len(self.filtered_streams)-1:
if row == offset + self.pad_h - 2:
new_offset += 1
new_row = row+1
if pad_name in cursor_line:
pad.move(row, 0)
pad.chgat(curses.A_NORMAL)
self.offsets[pad_name] = new_offset
pad.move(new_row, 0)
if pad_name in cursor_line:
pad.chgat(curses.A_REVERSE)
if pad_name == 'streams':
self.redraw_stream_footer()
if refresh:
self.refresh_current_pad()
def format_stream_line(self, stream):
idf = '{0} '.format(stream['id']).rjust(ID_FIELD_WIDTH)
name = ' {0}'.format(stream['name'][:NAME_FIELD_WIDTH-2]).ljust(NAME_FIELD_WIDTH)
res = ' {0}'.format(stream['res'][:RES_FIELD_WIDTH-2]).ljust(RES_FIELD_WIDTH)
views = '{0} '.format(stream['seen']).rjust(VIEWS_FIELD_WIDTH)
p = self.q.get_process(stream['id']) != None
if p:
indicator = self.config.INDICATORS[4] # playing
else:
indicator = self.config.INDICATORS[stream['online']]
return '{0} {1} {2} {3} {4}'.format(idf, name, res, views, indicator)
def redraw_current_line(self):
""" Redraw the highlighted line """
if self.no_streams:
return
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
pad = self.pads['streams']
pad.move(row, 0)
pad.clrtoeol()
pad.addstr(row, 0, self.format_stream_line(s), curses.A_REVERSE)
pad.chgat(curses.A_REVERSE)
pad.move(row, 0)
self.refresh_current_pad()
def set_status(self, status):
self.status = status
self.redraw_status()
def redraw_status(self):
self.s.move(self.max_y, 0)
self.overwrite_line(self.status[:self.max_x], curses.A_NORMAL)
self.s.refresh()
def redraw_stream_footer(self):
if not self.no_stream_shown:
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
self.set_footer('{0}/{1} {2} {3}'.format(row+1, len(self.filtered_streams), s['url'], s['res']))
self.s.refresh()
def check_stopped_streams(self):
finished = self.q.get_finished()
for f in finished:
for s in self.streams:
try:
i = self.filtered_streams.index(s)
except ValueError:
continue
if f == s['id']:
self.set_footer('Stream {0} has stopped'.format(s['name']))
if i == self.pads[self.current_pad].getyx()[0]:
attr = curses.A_REVERSE
else:
attr = curses.A_NORMAL
self.pads['streams'].addstr(i, PLAYING_FIELD_OFFSET,
self.config.INDICATORS[s['online']], attr)
self.refresh_current_pad()
def _check_stream(self, url):
try:
plugin = self.livestreamer.resolve_url(url)
avail_streams = plugin.get_streams()
if avail_streams:
return 1
return 0
except:
return 3
def check_online_streams(self):
self.all_streams_offline = True
self.set_status(' Checking online streams...')
done_queue = queue.Queue()
def check_stream_managed(args):
url, queue = args
status = self._check_stream(url)
done_queue.put(url)
return status
pool = Pool(self.config.CHECK_ONLINE_THREADS)
args = [(s['url'], done_queue) for s in self.streams]
statuses = pool.map_async(check_stream_managed, args)
n_streams = len(self.streams)
while not statuses.ready():
sleep(0.1)
self.set_status(' Checked {0}/{1} streams...'.format(done_queue.qsize(), n_streams))
self.s.refresh()
statuses = statuses.get()
for i, s in enumerate(self.streams):
s['online'] = statuses[i]
if s['online']:
self.all_streams_offline = False
self.refilter_streams()
self.last_autocheck = int(time())
pool.close()
def prompt_input(self, prompt=''):
self.s.move(self.max_y, 0)
self.s.clrtoeol()
self.s.addstr(prompt)
curses.curs_set(1)
curses.echo()
r = self.s.getstr().decode()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y, 0)
self.s.clrtoeol()
return r
def prompt_confirmation(self, prompt='', def_yes=False):
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if def_yes:
hint = '[y]/n'
else:
hint = 'y/[n]'
self.s.addstr('{0} {1} '.format(prompt, hint))
curses.curs_set(1)
curses.echo()
r = self.s.getch()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if r == ord('y'):
return True
elif r == ord('n'):
return False
else:
return def_yes
def sync_store(self):
self.store['streams'] = self.streams
self.store.sync()
def bump_stream(self, stream, throttle=False):
t = int(time())
# only bump if stream was last started some time ago
if throttle and t - stream['last_seen'] < 60*1:
return
stream['seen'] += 1
stream['last_seen'] = t
self.sync_store()
def find_stream(self, sel, key='id'):
for s in self.streams:
if s[key] == sel:
return s
return None
def clear_filter(self):
self.filter = ''
self.refilter_streams()
def filter_streams(self):
self.filter = self.prompt_input('Filter: ').lower()
self.refilter_streams()
def refilter_streams(self, quiet=False):
self.filtered_streams = []
for s in self.streams:
if ((self.show_offline_streams or s['online'] in [1,2])
and (self.filter in s['name'].lower() or self.filter in s['url'].lower())):
self.filtered_streams.append(s)
self.filtered_streams.sort(key=lambda s:s['seen'], reverse=True)
self.no_stream_shown = len(self.filtered_streams) == 0
if not quiet:
self.status = ' Filter: {0} ({1}/{2} matches, {3} showing offline streams)'.format(
self.filter or '<empty>', len(self.filtered_streams), len(self.streams),
'' if self.show_offline_streams else 'NOT')
self.init_streams_pad()
self.redraw_stream_footer()
self.show_streams()
self.redraw_status()
def add_stream(self, name, url, res=None, bump=False):
ex_stream = self.find_stream(url, key='url')
if ex_stream:
if bump:
self.bump_stream(ex_stream)
else:
if bump:
seen = 1
last_seen = int(time())
else:
seen = last_seen = 0
if not self.streams:
idf = 1
else:
self.max_id += 1
idf = self.max_id
s_res = res or self.default_res
if type(s_res) == str:
actual_res = s_res
elif type(s_res) == dict:
actual_res = DEFAULT_RESOLUTION_HARD
for k,v in s_res.items():
if k in url:
actual_res = v
break
elif callable(s_res):
actual_res = s_res(url) or DEFAULT_RESOLUTION_HARD
else:
actual_res = DEFAULT_RESOLUTION_HARD
self.set_status(' Checking if new stream is online...')
self.s.refresh()
online = self._check_stream(url)
new_stream = {
'id' : idf,
'name' : name,
'seen' : seen,
'last_seen' : last_seen,
'res' : actual_res,
'url' : url,
'online' : online
}
self.streams.append(new_stream)
self.no_streams = False
self.refilter_streams()
self.sync_store()
def delete_stream(self):
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Delete stream {0}?'.format(s['name'])):
return
self.filtered_streams.remove(s)
self.streams.remove(s)
pad.deleteln()
self.sync_store()
if not self.streams:
self.no_streams = True
if not self.filtered_streams:
self.no_stream_shown = True
if pad.getyx()[0] == len(self.filtered_streams) and not self.no_stream_shown:
self.move(-1, refresh=False)
pad.chgat(curses.A_REVERSE)
self.redraw_current_line()
self.show_streams()
def reset_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Reset stream {0}?'.format(s['name'])):
return
s['seen'] = 0
s['last_seen'] = 0
self.redraw_current_line()
self.sync_store()
def edit_stream(self, attr):
prompt_info = {
'name' : 'Name',
'url' : 'URL',
'res' : 'Resolution'
}
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
new_val = self.prompt_input('{0} (empty to cancel): '.format(prompt_info[attr]))
if new_val != '':
s[attr] = new_val
self.redraw_current_line()
self.redraw_status()
self.redraw_stream_footer()
def show_commandline(self):
self.set_footer('{0}/{1} {2}'.format(self.cmd_index+1, len(self.cmd_list), ' '.join(self.cmd)))
def shift_commandline(self):
self.cmd_index += 1
if self.cmd_index == len(self.cmd_list):
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.show_commandline()
def prompt_new_stream(self):
url = self.prompt_input('New stream URL (empty to cancel): ')
name = url.split('/')[-1]
if name:
self.add_stream(name, url)
self.move(len(self.filtered_streams)-1, absolute=True, refresh=False)
self.show_streams()
def play_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
try:
self.q.put(s, self.cmd)
self.bump_stream(s, throttle=True)
self.redraw_current_line()
self.refresh_current_pad()
except Exception as e:
if type(e) == QueueDuplicate:
self.set_footer('This stream is already playing')
elif type(e) == OSError:
self.set_footer('/!\ Faulty command line: {0}'.format(e.strerror))
else:
raise e
def stop_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
p = self.q.terminate_process(s['id'])
if p:
self.redraw_current_line()
self.redraw_stream_footer()
self.redraw_status()
|
gapato/livestreamer-curses
|
src/livestreamer_curses/streamlist.py
|
StreamList.run
|
python
|
def run(self):
# Show stream list
self.show_streams()
while True:
self.s.refresh()
# See if any stream has ended
self.check_stopped_streams()
# Wait on stdin or on the streams output
souts = self.q.get_stdouts()
souts.append(sys.stdin)
try:
(r, w, x) = select.select(souts, [], [], 1)
except select.error:
continue
if not r:
if self.config.CHECK_ONLINE_INTERVAL <= 0: continue
cur_time = int(time())
time_delta = cur_time - self.last_autocheck
if time_delta > self.config.CHECK_ONLINE_INTERVAL:
self.check_online_streams()
self.set_status('Next check at {0}'.format(
strftime('%H:%M:%S', localtime(time() + self.config.CHECK_ONLINE_INTERVAL))
))
continue
for fd in r:
if fd != sys.stdin:
# Set the new status line only if non-empty
msg = fd.readline()
if msg:
self.set_status(msg[:-1])
else:
# Main event loop
c = self.pads[self.current_pad].getch()
if c == curses.KEY_UP or c == ord('k'):
self.move(-1)
elif c == curses.KEY_DOWN or c == ord('j'):
self.move(1)
elif c == ord('f'):
if self.current_pad == 'streams':
self.filter_streams()
elif c == ord('F'):
if self.current_pad == 'streams':
self.clear_filter()
elif c == ord('g'):
if self.got_g:
self.move(0, absolute=True)
self.got_g = False
continue
self.got_g = True
elif c == ord('G'):
self.move(len(self.filtered_streams)-1, absolute=True)
elif c == ord('q'):
if self.current_pad == 'streams':
self.q.terminate()
return
else:
self.show_streams()
elif c == 27: # ESC
if self.current_pad != 'streams':
self.show_streams()
if self.current_pad == 'help':
continue
elif c == 10:
self.play_stream()
elif c == ord('s'):
self.stop_stream()
elif c == ord('c'):
self.reset_stream()
elif c == ord('n'):
self.edit_stream('name')
elif c == ord('r'):
self.edit_stream('res')
elif c == ord('u'):
self.edit_stream('url')
elif c == ord('l'):
self.show_commandline()
elif c == ord('L'):
self.shift_commandline()
elif c == ord('a'):
self.prompt_new_stream()
elif c == ord('d'):
self.delete_stream()
elif c == ord('o'):
self.show_offline_streams ^= True
self.refilter_streams()
elif c == ord('O'):
self.check_online_streams()
elif c == ord('h') or c == ord('?'):
self.show_help()
|
Main event loop
|
train
|
https://github.com/gapato/livestreamer-curses/blob/d841a421422db8c5b5a8bcfcff6b3ddd7ea8a64b/src/livestreamer_curses/streamlist.py#L275-L368
|
[
"def show_help(self):\n \"\"\" Redraw Help screen and wait for any input to leave \"\"\"\n self.s.move(1,0)\n self.s.clrtobot()\n self.set_header('Help'.center(self.pad_w))\n self.set_footer(' ESC or \\'q\\' to return to main menu')\n self.s.refresh()\n self.current_pad = 'help'\n self.refresh_current_pad()\n",
"def show_streams(self):\n self.s.move(1,0)\n self.s.clrtobot()\n self.current_pad = 'streams'\n if self.no_stream_shown:\n self.hide_streams_pad()\n if self.no_streams:\n self.s.addstr(5, 5, 'It seems you don\\'t have any stream yet')\n self.s.addstr(6, 5, 'Hit \\'a\\' to add a new one')\n self.s.addstr(8, 5, 'Hit \\'?\\' for help')\n elif self.all_streams_offline and not self.show_offline_streams:\n self.s.addstr(5, 5, 'All streams are currently offline')\n self.s.addstr(6, 5, 'Hit \\'o\\' to show offline streams')\n self.s.addstr(7, 5, 'Hit \\'O\\' to refresh')\n self.s.addstr(9, 5, 'Hit \\'?\\' for help')\n else:\n self.s.addstr(5, 5, 'No stream matches your filter')\n self.s.addstr(6, 5, 'Hit \\'f\\' to change filter')\n self.s.addstr(7, 5, 'Hit \\'F\\' to clear')\n self.s.addstr(8, 5, 'Hit \\'o\\' to show offline streams')\n self.s.addstr(10, 5, 'Hit \\'?\\' for help')\n else:\n idf = 'ID'.center(ID_FIELD_WIDTH)\n name = 'Name'.center(NAME_FIELD_WIDTH)\n res = 'Resolution'.center(RES_FIELD_WIDTH)\n views = 'Views'.center(VIEWS_FIELD_WIDTH)\n self.set_header('{0} {1} {2} {3} Status'.format(idf, name, res, views))\n self.redraw_stream_footer()\n self.redraw_status()\n self.s.refresh()\n if not self.no_stream_shown:\n self.refresh_current_pad()\n",
"def move(self, direction, absolute=False, pad_name=None, refresh=True):\n \"\"\" Scroll the current pad\n\n direction : (int) move by one in the given direction\n -1 is up, 1 is down. If absolute is True,\n go to position direction.\n Behaviour is affected by cursor_line and scroll_only below\n absolute : (bool)\n \"\"\"\n\n # pad in this lists have the current line highlighted\n cursor_line = [ 'streams' ]\n\n # pads in this list will be moved screen-wise as opposed to line-wise\n # if absolute is set, will go all the way top or all the way down depending\n # on direction\n scroll_only = [ 'help' ]\n\n if not pad_name:\n pad_name = self.current_pad\n pad = self.pads[pad_name]\n if pad_name == 'streams' and self.no_streams:\n return\n (row, col) = pad.getyx()\n new_row = row\n offset = self.offsets[pad_name]\n new_offset = offset\n if pad_name in scroll_only:\n if absolute:\n if direction > 0:\n new_offset = pad.getmaxyx()[0] - self.pad_h + 1\n else:\n new_offset = 0\n else:\n if direction > 0:\n new_offset = min(pad.getmaxyx()[0] - self.pad_h + 1, offset + self.pad_h)\n elif offset > 0:\n new_offset = max(0, offset - self.pad_h)\n else:\n if absolute and direction >= 0 and direction < pad.getmaxyx()[0]:\n if direction < offset:\n new_offset = direction\n elif direction > offset + self.pad_h - 2:\n new_offset = direction - self.pad_h + 2\n new_row = direction\n else:\n if direction == -1 and row > 0:\n if row == offset:\n new_offset -= 1\n new_row = row-1\n elif direction == 1 and row < len(self.filtered_streams)-1:\n if row == offset + self.pad_h - 2:\n new_offset += 1\n new_row = row+1\n if pad_name in cursor_line:\n pad.move(row, 0)\n pad.chgat(curses.A_NORMAL)\n self.offsets[pad_name] = new_offset\n pad.move(new_row, 0)\n if pad_name in cursor_line:\n pad.chgat(curses.A_REVERSE)\n if pad_name == 'streams':\n self.redraw_stream_footer()\n if refresh:\n self.refresh_current_pad()\n",
"def set_status(self, status):\n self.status = status\n self.redraw_status()\n",
"def check_stopped_streams(self):\n finished = self.q.get_finished()\n for f in finished:\n for s in self.streams:\n try:\n i = self.filtered_streams.index(s)\n except ValueError:\n continue\n if f == s['id']:\n self.set_footer('Stream {0} has stopped'.format(s['name']))\n if i == self.pads[self.current_pad].getyx()[0]:\n attr = curses.A_REVERSE\n else:\n attr = curses.A_NORMAL\n self.pads['streams'].addstr(i, PLAYING_FIELD_OFFSET,\n self.config.INDICATORS[s['online']], attr)\n self.refresh_current_pad()\n",
"def check_online_streams(self):\n self.all_streams_offline = True\n self.set_status(' Checking online streams...')\n\n done_queue = queue.Queue()\n\n def check_stream_managed(args):\n url, queue = args\n status = self._check_stream(url)\n done_queue.put(url)\n return status\n\n pool = Pool(self.config.CHECK_ONLINE_THREADS)\n args = [(s['url'], done_queue) for s in self.streams]\n statuses = pool.map_async(check_stream_managed, args)\n n_streams = len(self.streams)\n\n while not statuses.ready():\n sleep(0.1)\n self.set_status(' Checked {0}/{1} streams...'.format(done_queue.qsize(), n_streams))\n self.s.refresh()\n\n statuses = statuses.get()\n for i, s in enumerate(self.streams):\n s['online'] = statuses[i]\n if s['online']:\n self.all_streams_offline = False\n\n self.refilter_streams()\n self.last_autocheck = int(time())\n\n pool.close()\n",
"def clear_filter(self):\n self.filter = ''\n self.refilter_streams()\n",
"def filter_streams(self):\n self.filter = self.prompt_input('Filter: ').lower()\n self.refilter_streams()\n",
"def refilter_streams(self, quiet=False):\n self.filtered_streams = []\n for s in self.streams:\n if ((self.show_offline_streams or s['online'] in [1,2])\n and (self.filter in s['name'].lower() or self.filter in s['url'].lower())):\n self.filtered_streams.append(s)\n self.filtered_streams.sort(key=lambda s:s['seen'], reverse=True)\n self.no_stream_shown = len(self.filtered_streams) == 0\n if not quiet:\n self.status = ' Filter: {0} ({1}/{2} matches, {3} showing offline streams)'.format(\n self.filter or '<empty>', len(self.filtered_streams), len(self.streams),\n '' if self.show_offline_streams else 'NOT')\n self.init_streams_pad()\n self.redraw_stream_footer()\n self.show_streams()\n self.redraw_status()\n",
"def delete_stream(self):\n if self.no_streams:\n return\n pad = self.pads[self.current_pad]\n s = self.filtered_streams[pad.getyx()[0]]\n if not self.prompt_confirmation('Delete stream {0}?'.format(s['name'])):\n return\n self.filtered_streams.remove(s)\n self.streams.remove(s)\n pad.deleteln()\n self.sync_store()\n if not self.streams:\n self.no_streams = True\n if not self.filtered_streams:\n self.no_stream_shown = True\n if pad.getyx()[0] == len(self.filtered_streams) and not self.no_stream_shown:\n self.move(-1, refresh=False)\n pad.chgat(curses.A_REVERSE)\n self.redraw_current_line()\n self.show_streams()\n",
"def reset_stream(self):\n if self.no_stream_shown:\n return\n pad = self.pads[self.current_pad]\n s = self.filtered_streams[pad.getyx()[0]]\n if not self.prompt_confirmation('Reset stream {0}?'.format(s['name'])):\n return\n s['seen'] = 0\n s['last_seen'] = 0\n self.redraw_current_line()\n self.sync_store()\n",
"def edit_stream(self, attr):\n prompt_info = {\n 'name' : 'Name',\n 'url' : 'URL',\n 'res' : 'Resolution'\n }\n if self.no_streams:\n return\n pad = self.pads[self.current_pad]\n s = self.filtered_streams[pad.getyx()[0]]\n new_val = self.prompt_input('{0} (empty to cancel): '.format(prompt_info[attr]))\n if new_val != '':\n s[attr] = new_val\n self.redraw_current_line()\n self.redraw_status()\n self.redraw_stream_footer()\n",
"def show_commandline(self):\n self.set_footer('{0}/{1} {2}'.format(self.cmd_index+1, len(self.cmd_list), ' '.join(self.cmd)))\n",
"def shift_commandline(self):\n self.cmd_index += 1\n if self.cmd_index == len(self.cmd_list):\n self.cmd_index = 0\n self.cmd = self.cmd_list[self.cmd_index]\n self.show_commandline()\n",
"def prompt_new_stream(self):\n url = self.prompt_input('New stream URL (empty to cancel): ')\n name = url.split('/')[-1]\n if name:\n self.add_stream(name, url)\n self.move(len(self.filtered_streams)-1, absolute=True, refresh=False)\n self.show_streams()\n",
"def play_stream(self):\n if self.no_stream_shown:\n return\n pad = self.pads[self.current_pad]\n s = self.filtered_streams[pad.getyx()[0]]\n try:\n self.q.put(s, self.cmd)\n self.bump_stream(s, throttle=True)\n self.redraw_current_line()\n self.refresh_current_pad()\n except Exception as e:\n if type(e) == QueueDuplicate:\n self.set_footer('This stream is already playing')\n elif type(e) == OSError:\n self.set_footer('/!\\ Faulty command line: {0}'.format(e.strerror))\n else:\n raise e\n",
"def stop_stream(self):\n if self.no_stream_shown:\n return\n pad = self.pads[self.current_pad]\n s = self.filtered_streams[pad.getyx()[0]]\n p = self.q.terminate_process(s['id'])\n if p:\n self.redraw_current_line()\n self.redraw_stream_footer()\n self.redraw_status()\n"
] |
class StreamList(object):
def __init__(self, filename, config, list_streams=False, init_stream_list=None):
""" Init and try to load a stream list, nothing about curses yet """
global TITLE_STRING
self.db_was_read = False
# Open the storage (create it if necessary)
try:
db_dir = os.path.dirname(filename)
if not os.path.exists(db_dir):
os.makedirs(db_dir)
f = shelve.open(filename, 'c')
except Exception:
raise ShelveError(
'Database could not be opened, another livestreamer-curses instance might be already running. '
'Please note that a database created with Python 2.x cannot be used with Python 3.x and vice versa.'
)
self.max_id = 0
if init_stream_list:
f['streams'] = init_stream_list
for i, s in enumerate(f['streams']):
s['id'] = s.get('id') or i
s['seen'] = s.get('seen') or 0
s['last_seen'] = s.get('last_seen') or 0
self.max_id = i
f.sync()
# Sort streams by view count
try:
self.streams = sorted(f['streams'], key=lambda s:s['seen'], reverse=True)
for s in self.streams:
# Max id, needed when adding a new stream
self.max_id = max(self.max_id, s['id'])
s['online'] = 2
if list_streams:
print(json.dumps(self.streams))
f.close()
sys.exit(0)
except:
self.streams = []
self.db_was_read = True
self.filtered_streams = list(self.streams)
self.filter = ''
self.all_streams_offline = None
self.show_offline_streams = False
self.config = config
TITLE_STRING = TITLE_STRING.format(self.config.VERSION)
self.cmd_list = list(map(shlex.split, self.config.LIVESTREAMER_COMMANDS))
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.last_autocheck = 0
self.default_res = self.config.DEFAULT_RESOLUTION
self.store = f
self.store.sync()
self.no_streams = self.streams == []
self.no_stream_shown = self.no_streams
self.q = ProcessList(StreamPlayer().play)
self.livestreamer = livestreamer.Livestreamer()
def __del__(self):
""" Stop playing streams and sync storage """
try:
self.q.terminate()
if self.db_was_read:
self.store['cmd'] = self.cmd
self.store['streams'] = self.streams
self.store.close()
except:
pass
def __call__(self, s):
# Terminal initialization
self.init(s)
# Main event loop
self.run()
def init(self, s):
""" Initialize the text interface """
# Hide cursor
curses.curs_set(0)
self.s = s
self.s.keypad(1)
self.set_screen_size()
self.pads = {}
self.offsets = {}
self.init_help()
self.init_streams_pad()
self.current_pad = 'streams'
self.set_title(TITLE_STRING)
self.got_g = False
signal.signal(28, self.resize)
if self.config.CHECK_ONLINE_ON_START:
self.check_online_streams()
self.set_status('Ready')
def getheightwidth(self):
""" getwidth() -> (int, int)
Return the height and width of the console in characters
https://groups.google.com/forum/#!msg/comp.lang.python/CpUszNNXUQM/QADpl11Z-nAJ"""
try:
return int(os.environ["LINES"]), int(os.environ["COLUMNS"])
except KeyError:
height, width = struct.unpack(
"hhhh", ioctl(0, termios.TIOCGWINSZ ,"\000"*8))[0:2]
if not height:
return 25, 80
return height, width
def resize(self, signum, obj):
""" handler for SIGWINCH """
self.s.clear()
stream_cursor = self.pads['streams'].getyx()[0]
for pad in self.pads.values():
pad.clear()
self.s.refresh()
self.set_screen_size()
self.set_title(TITLE_STRING)
self.init_help()
self.init_streams_pad()
self.move(stream_cursor, absolute=True, pad_name='streams', refresh=False)
self.s.refresh()
self.show()
def set_screen_size(self):
""" Setup screen size and padding
We have need 2 free lines at the top and 2 free lines at the bottom
"""
height, width = self.getheightwidth()
curses.resizeterm(height, width)
self.pad_x = 0
self.max_y, self.max_x = (height-1, width-1)
self.pad_h = height-3
self.pad_w = width-2*self.pad_x
def overwrite_line(self, msg, attr=curses.A_NORMAL):
self.s.clrtoeol()
self.s.addstr(msg, attr)
self.s.chgat(attr)
def set_title(self, msg):
""" Set first header line text """
self.s.move(0, 0)
self.overwrite_line(msg, curses.A_REVERSE)
def set_header(self, msg):
""" Set second head line text """
self.s.move(1, 0)
self.overwrite_line(msg, attr=curses.A_NORMAL)
def set_footer(self, msg, reverse=True):
""" Set first footer line text """
self.s.move(self.max_y-1, 0)
if reverse:
self.overwrite_line(msg, attr=curses.A_REVERSE)
else:
self.overwrite_line(msg, attr=curses.A_NORMAL)
def clear_footer(self):
self.s.move(self.max_y-1, 0)
self.overwrite_line('')
def init_help(self):
help_pad_length = 27 # there should be a neater way to do this
h = curses.newpad(help_pad_length, self.pad_w)
h.keypad(1)
h.addstr( 0, 0, 'STREAM MANAGEMENT', curses.A_BOLD)
h.addstr( 2, 0, ' Enter : start stream')
h.addstr( 3, 0, ' s : stop stream')
h.addstr( 4, 0, ' r : change stream resolution')
h.addstr( 5, 0, ' n : change stream name')
h.addstr( 6, 0, ' u : change stream URL')
h.addstr( 7, 0, ' c : reset stream view count')
h.addstr( 8, 0, ' a : add stream')
h.addstr( 9, 0, ' d : delete stream')
h.addstr(11, 0, ' l : show command line')
h.addstr(12, 0, ' L : cycle command line')
h.addstr(15, 0, 'NAVIGATION', curses.A_BOLD)
h.addstr(17, 0, ' j/up : up one line')
h.addstr(18, 0, ' k/down: down one line')
h.addstr(19, 0, ' f : filter streams')
h.addstr(20, 0, ' F : clear filter')
h.addstr(21, 0, ' o : toggle offline streams')
h.addstr(22, 0, ' O : check for online streams')
h.addstr(23, 0, ' gg : go to top')
h.addstr(24, 0, ' G : go to bottom')
h.addstr(25, 0, ' h/? : show this help')
h.addstr(26, 0, ' q : quit')
self.pads['help'] = h
self.offsets['help'] = 0
def show(self):
funcs = {
'streams' : self.show_streams,
'help' : self.show_help
}
funcs[self.current_pad]()
def show_help(self):
""" Redraw Help screen and wait for any input to leave """
self.s.move(1,0)
self.s.clrtobot()
self.set_header('Help'.center(self.pad_w))
self.set_footer(' ESC or \'q\' to return to main menu')
self.s.refresh()
self.current_pad = 'help'
self.refresh_current_pad()
def init_streams_pad(self, start_row=0):
""" Create a curses pad and populate it with a line by stream """
y = 0
pad = curses.newpad(max(1,len(self.filtered_streams)), self.pad_w)
pad.keypad(1)
for s in self.filtered_streams:
pad.addstr(y, 0, self.format_stream_line(s))
y+=1
self.offsets['streams'] = 0
pad.move(start_row, 0)
if not self.no_stream_shown:
pad.chgat(curses.A_REVERSE)
self.pads['streams'] = pad
def show_streams(self):
self.s.move(1,0)
self.s.clrtobot()
self.current_pad = 'streams'
if self.no_stream_shown:
self.hide_streams_pad()
if self.no_streams:
self.s.addstr(5, 5, 'It seems you don\'t have any stream yet')
self.s.addstr(6, 5, 'Hit \'a\' to add a new one')
self.s.addstr(8, 5, 'Hit \'?\' for help')
elif self.all_streams_offline and not self.show_offline_streams:
self.s.addstr(5, 5, 'All streams are currently offline')
self.s.addstr(6, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(7, 5, 'Hit \'O\' to refresh')
self.s.addstr(9, 5, 'Hit \'?\' for help')
else:
self.s.addstr(5, 5, 'No stream matches your filter')
self.s.addstr(6, 5, 'Hit \'f\' to change filter')
self.s.addstr(7, 5, 'Hit \'F\' to clear')
self.s.addstr(8, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(10, 5, 'Hit \'?\' for help')
else:
idf = 'ID'.center(ID_FIELD_WIDTH)
name = 'Name'.center(NAME_FIELD_WIDTH)
res = 'Resolution'.center(RES_FIELD_WIDTH)
views = 'Views'.center(VIEWS_FIELD_WIDTH)
self.set_header('{0} {1} {2} {3} Status'.format(idf, name, res, views))
self.redraw_stream_footer()
self.redraw_status()
self.s.refresh()
if not self.no_stream_shown:
self.refresh_current_pad()
def hide_streams_pad(self):
pad = self.pads.get('streams')
if pad:
pad.refresh(0, 0, 2, 0, 2, 0)
def refresh_current_pad(self):
pad = self.pads[self.current_pad]
pad.refresh(self.offsets[self.current_pad], 0, 2, self.pad_x, self.pad_h, self.pad_w)
def move(self, direction, absolute=False, pad_name=None, refresh=True):
""" Scroll the current pad
direction : (int) move by one in the given direction
-1 is up, 1 is down. If absolute is True,
go to position direction.
Behaviour is affected by cursor_line and scroll_only below
absolute : (bool)
"""
# pad in this lists have the current line highlighted
cursor_line = [ 'streams' ]
# pads in this list will be moved screen-wise as opposed to line-wise
# if absolute is set, will go all the way top or all the way down depending
# on direction
scroll_only = [ 'help' ]
if not pad_name:
pad_name = self.current_pad
pad = self.pads[pad_name]
if pad_name == 'streams' and self.no_streams:
return
(row, col) = pad.getyx()
new_row = row
offset = self.offsets[pad_name]
new_offset = offset
if pad_name in scroll_only:
if absolute:
if direction > 0:
new_offset = pad.getmaxyx()[0] - self.pad_h + 1
else:
new_offset = 0
else:
if direction > 0:
new_offset = min(pad.getmaxyx()[0] - self.pad_h + 1, offset + self.pad_h)
elif offset > 0:
new_offset = max(0, offset - self.pad_h)
else:
if absolute and direction >= 0 and direction < pad.getmaxyx()[0]:
if direction < offset:
new_offset = direction
elif direction > offset + self.pad_h - 2:
new_offset = direction - self.pad_h + 2
new_row = direction
else:
if direction == -1 and row > 0:
if row == offset:
new_offset -= 1
new_row = row-1
elif direction == 1 and row < len(self.filtered_streams)-1:
if row == offset + self.pad_h - 2:
new_offset += 1
new_row = row+1
if pad_name in cursor_line:
pad.move(row, 0)
pad.chgat(curses.A_NORMAL)
self.offsets[pad_name] = new_offset
pad.move(new_row, 0)
if pad_name in cursor_line:
pad.chgat(curses.A_REVERSE)
if pad_name == 'streams':
self.redraw_stream_footer()
if refresh:
self.refresh_current_pad()
def format_stream_line(self, stream):
idf = '{0} '.format(stream['id']).rjust(ID_FIELD_WIDTH)
name = ' {0}'.format(stream['name'][:NAME_FIELD_WIDTH-2]).ljust(NAME_FIELD_WIDTH)
res = ' {0}'.format(stream['res'][:RES_FIELD_WIDTH-2]).ljust(RES_FIELD_WIDTH)
views = '{0} '.format(stream['seen']).rjust(VIEWS_FIELD_WIDTH)
p = self.q.get_process(stream['id']) != None
if p:
indicator = self.config.INDICATORS[4] # playing
else:
indicator = self.config.INDICATORS[stream['online']]
return '{0} {1} {2} {3} {4}'.format(idf, name, res, views, indicator)
def redraw_current_line(self):
""" Redraw the highlighted line """
if self.no_streams:
return
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
pad = self.pads['streams']
pad.move(row, 0)
pad.clrtoeol()
pad.addstr(row, 0, self.format_stream_line(s), curses.A_REVERSE)
pad.chgat(curses.A_REVERSE)
pad.move(row, 0)
self.refresh_current_pad()
def set_status(self, status):
self.status = status
self.redraw_status()
def redraw_status(self):
self.s.move(self.max_y, 0)
self.overwrite_line(self.status[:self.max_x], curses.A_NORMAL)
self.s.refresh()
def redraw_stream_footer(self):
if not self.no_stream_shown:
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
self.set_footer('{0}/{1} {2} {3}'.format(row+1, len(self.filtered_streams), s['url'], s['res']))
self.s.refresh()
def check_stopped_streams(self):
finished = self.q.get_finished()
for f in finished:
for s in self.streams:
try:
i = self.filtered_streams.index(s)
except ValueError:
continue
if f == s['id']:
self.set_footer('Stream {0} has stopped'.format(s['name']))
if i == self.pads[self.current_pad].getyx()[0]:
attr = curses.A_REVERSE
else:
attr = curses.A_NORMAL
self.pads['streams'].addstr(i, PLAYING_FIELD_OFFSET,
self.config.INDICATORS[s['online']], attr)
self.refresh_current_pad()
def _check_stream(self, url):
try:
plugin = self.livestreamer.resolve_url(url)
avail_streams = plugin.get_streams()
if avail_streams:
return 1
return 0
except:
return 3
def check_online_streams(self):
self.all_streams_offline = True
self.set_status(' Checking online streams...')
done_queue = queue.Queue()
def check_stream_managed(args):
url, queue = args
status = self._check_stream(url)
done_queue.put(url)
return status
pool = Pool(self.config.CHECK_ONLINE_THREADS)
args = [(s['url'], done_queue) for s in self.streams]
statuses = pool.map_async(check_stream_managed, args)
n_streams = len(self.streams)
while not statuses.ready():
sleep(0.1)
self.set_status(' Checked {0}/{1} streams...'.format(done_queue.qsize(), n_streams))
self.s.refresh()
statuses = statuses.get()
for i, s in enumerate(self.streams):
s['online'] = statuses[i]
if s['online']:
self.all_streams_offline = False
self.refilter_streams()
self.last_autocheck = int(time())
pool.close()
def prompt_input(self, prompt=''):
self.s.move(self.max_y, 0)
self.s.clrtoeol()
self.s.addstr(prompt)
curses.curs_set(1)
curses.echo()
r = self.s.getstr().decode()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y, 0)
self.s.clrtoeol()
return r
def prompt_confirmation(self, prompt='', def_yes=False):
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if def_yes:
hint = '[y]/n'
else:
hint = 'y/[n]'
self.s.addstr('{0} {1} '.format(prompt, hint))
curses.curs_set(1)
curses.echo()
r = self.s.getch()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if r == ord('y'):
return True
elif r == ord('n'):
return False
else:
return def_yes
def sync_store(self):
self.store['streams'] = self.streams
self.store.sync()
def bump_stream(self, stream, throttle=False):
t = int(time())
# only bump if stream was last started some time ago
if throttle and t - stream['last_seen'] < 60*1:
return
stream['seen'] += 1
stream['last_seen'] = t
self.sync_store()
def find_stream(self, sel, key='id'):
for s in self.streams:
if s[key] == sel:
return s
return None
def clear_filter(self):
self.filter = ''
self.refilter_streams()
def filter_streams(self):
self.filter = self.prompt_input('Filter: ').lower()
self.refilter_streams()
def refilter_streams(self, quiet=False):
self.filtered_streams = []
for s in self.streams:
if ((self.show_offline_streams or s['online'] in [1,2])
and (self.filter in s['name'].lower() or self.filter in s['url'].lower())):
self.filtered_streams.append(s)
self.filtered_streams.sort(key=lambda s:s['seen'], reverse=True)
self.no_stream_shown = len(self.filtered_streams) == 0
if not quiet:
self.status = ' Filter: {0} ({1}/{2} matches, {3} showing offline streams)'.format(
self.filter or '<empty>', len(self.filtered_streams), len(self.streams),
'' if self.show_offline_streams else 'NOT')
self.init_streams_pad()
self.redraw_stream_footer()
self.show_streams()
self.redraw_status()
def add_stream(self, name, url, res=None, bump=False):
ex_stream = self.find_stream(url, key='url')
if ex_stream:
if bump:
self.bump_stream(ex_stream)
else:
if bump:
seen = 1
last_seen = int(time())
else:
seen = last_seen = 0
if not self.streams:
idf = 1
else:
self.max_id += 1
idf = self.max_id
s_res = res or self.default_res
if type(s_res) == str:
actual_res = s_res
elif type(s_res) == dict:
actual_res = DEFAULT_RESOLUTION_HARD
for k,v in s_res.items():
if k in url:
actual_res = v
break
elif callable(s_res):
actual_res = s_res(url) or DEFAULT_RESOLUTION_HARD
else:
actual_res = DEFAULT_RESOLUTION_HARD
self.set_status(' Checking if new stream is online...')
self.s.refresh()
online = self._check_stream(url)
new_stream = {
'id' : idf,
'name' : name,
'seen' : seen,
'last_seen' : last_seen,
'res' : actual_res,
'url' : url,
'online' : online
}
self.streams.append(new_stream)
self.no_streams = False
self.refilter_streams()
self.sync_store()
def delete_stream(self):
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Delete stream {0}?'.format(s['name'])):
return
self.filtered_streams.remove(s)
self.streams.remove(s)
pad.deleteln()
self.sync_store()
if not self.streams:
self.no_streams = True
if not self.filtered_streams:
self.no_stream_shown = True
if pad.getyx()[0] == len(self.filtered_streams) and not self.no_stream_shown:
self.move(-1, refresh=False)
pad.chgat(curses.A_REVERSE)
self.redraw_current_line()
self.show_streams()
def reset_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Reset stream {0}?'.format(s['name'])):
return
s['seen'] = 0
s['last_seen'] = 0
self.redraw_current_line()
self.sync_store()
def edit_stream(self, attr):
prompt_info = {
'name' : 'Name',
'url' : 'URL',
'res' : 'Resolution'
}
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
new_val = self.prompt_input('{0} (empty to cancel): '.format(prompt_info[attr]))
if new_val != '':
s[attr] = new_val
self.redraw_current_line()
self.redraw_status()
self.redraw_stream_footer()
def show_commandline(self):
self.set_footer('{0}/{1} {2}'.format(self.cmd_index+1, len(self.cmd_list), ' '.join(self.cmd)))
def shift_commandline(self):
self.cmd_index += 1
if self.cmd_index == len(self.cmd_list):
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.show_commandline()
def prompt_new_stream(self):
url = self.prompt_input('New stream URL (empty to cancel): ')
name = url.split('/')[-1]
if name:
self.add_stream(name, url)
self.move(len(self.filtered_streams)-1, absolute=True, refresh=False)
self.show_streams()
def play_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
try:
self.q.put(s, self.cmd)
self.bump_stream(s, throttle=True)
self.redraw_current_line()
self.refresh_current_pad()
except Exception as e:
if type(e) == QueueDuplicate:
self.set_footer('This stream is already playing')
elif type(e) == OSError:
self.set_footer('/!\ Faulty command line: {0}'.format(e.strerror))
else:
raise e
def stop_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
p = self.q.terminate_process(s['id'])
if p:
self.redraw_current_line()
self.redraw_stream_footer()
self.redraw_status()
|
gapato/livestreamer-curses
|
src/livestreamer_curses/streamlist.py
|
StreamList.set_screen_size
|
python
|
def set_screen_size(self):
height, width = self.getheightwidth()
curses.resizeterm(height, width)
self.pad_x = 0
self.max_y, self.max_x = (height-1, width-1)
self.pad_h = height-3
self.pad_w = width-2*self.pad_x
|
Setup screen size and padding
We have need 2 free lines at the top and 2 free lines at the bottom
|
train
|
https://github.com/gapato/livestreamer-curses/blob/d841a421422db8c5b5a8bcfcff6b3ddd7ea8a64b/src/livestreamer_curses/streamlist.py#L370-L381
|
[
"def getheightwidth(self):\n \"\"\" getwidth() -> (int, int)\n\n Return the height and width of the console in characters\n https://groups.google.com/forum/#!msg/comp.lang.python/CpUszNNXUQM/QADpl11Z-nAJ\"\"\"\n try:\n return int(os.environ[\"LINES\"]), int(os.environ[\"COLUMNS\"])\n except KeyError:\n height, width = struct.unpack(\n \"hhhh\", ioctl(0, termios.TIOCGWINSZ ,\"\\000\"*8))[0:2]\n if not height:\n return 25, 80\n return height, width\n"
] |
class StreamList(object):
def __init__(self, filename, config, list_streams=False, init_stream_list=None):
""" Init and try to load a stream list, nothing about curses yet """
global TITLE_STRING
self.db_was_read = False
# Open the storage (create it if necessary)
try:
db_dir = os.path.dirname(filename)
if not os.path.exists(db_dir):
os.makedirs(db_dir)
f = shelve.open(filename, 'c')
except Exception:
raise ShelveError(
'Database could not be opened, another livestreamer-curses instance might be already running. '
'Please note that a database created with Python 2.x cannot be used with Python 3.x and vice versa.'
)
self.max_id = 0
if init_stream_list:
f['streams'] = init_stream_list
for i, s in enumerate(f['streams']):
s['id'] = s.get('id') or i
s['seen'] = s.get('seen') or 0
s['last_seen'] = s.get('last_seen') or 0
self.max_id = i
f.sync()
# Sort streams by view count
try:
self.streams = sorted(f['streams'], key=lambda s:s['seen'], reverse=True)
for s in self.streams:
# Max id, needed when adding a new stream
self.max_id = max(self.max_id, s['id'])
s['online'] = 2
if list_streams:
print(json.dumps(self.streams))
f.close()
sys.exit(0)
except:
self.streams = []
self.db_was_read = True
self.filtered_streams = list(self.streams)
self.filter = ''
self.all_streams_offline = None
self.show_offline_streams = False
self.config = config
TITLE_STRING = TITLE_STRING.format(self.config.VERSION)
self.cmd_list = list(map(shlex.split, self.config.LIVESTREAMER_COMMANDS))
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.last_autocheck = 0
self.default_res = self.config.DEFAULT_RESOLUTION
self.store = f
self.store.sync()
self.no_streams = self.streams == []
self.no_stream_shown = self.no_streams
self.q = ProcessList(StreamPlayer().play)
self.livestreamer = livestreamer.Livestreamer()
def __del__(self):
""" Stop playing streams and sync storage """
try:
self.q.terminate()
if self.db_was_read:
self.store['cmd'] = self.cmd
self.store['streams'] = self.streams
self.store.close()
except:
pass
def __call__(self, s):
# Terminal initialization
self.init(s)
# Main event loop
self.run()
def init(self, s):
""" Initialize the text interface """
# Hide cursor
curses.curs_set(0)
self.s = s
self.s.keypad(1)
self.set_screen_size()
self.pads = {}
self.offsets = {}
self.init_help()
self.init_streams_pad()
self.current_pad = 'streams'
self.set_title(TITLE_STRING)
self.got_g = False
signal.signal(28, self.resize)
if self.config.CHECK_ONLINE_ON_START:
self.check_online_streams()
self.set_status('Ready')
def getheightwidth(self):
""" getwidth() -> (int, int)
Return the height and width of the console in characters
https://groups.google.com/forum/#!msg/comp.lang.python/CpUszNNXUQM/QADpl11Z-nAJ"""
try:
return int(os.environ["LINES"]), int(os.environ["COLUMNS"])
except KeyError:
height, width = struct.unpack(
"hhhh", ioctl(0, termios.TIOCGWINSZ ,"\000"*8))[0:2]
if not height:
return 25, 80
return height, width
def resize(self, signum, obj):
""" handler for SIGWINCH """
self.s.clear()
stream_cursor = self.pads['streams'].getyx()[0]
for pad in self.pads.values():
pad.clear()
self.s.refresh()
self.set_screen_size()
self.set_title(TITLE_STRING)
self.init_help()
self.init_streams_pad()
self.move(stream_cursor, absolute=True, pad_name='streams', refresh=False)
self.s.refresh()
self.show()
def run(self):
""" Main event loop """
# Show stream list
self.show_streams()
while True:
self.s.refresh()
# See if any stream has ended
self.check_stopped_streams()
# Wait on stdin or on the streams output
souts = self.q.get_stdouts()
souts.append(sys.stdin)
try:
(r, w, x) = select.select(souts, [], [], 1)
except select.error:
continue
if not r:
if self.config.CHECK_ONLINE_INTERVAL <= 0: continue
cur_time = int(time())
time_delta = cur_time - self.last_autocheck
if time_delta > self.config.CHECK_ONLINE_INTERVAL:
self.check_online_streams()
self.set_status('Next check at {0}'.format(
strftime('%H:%M:%S', localtime(time() + self.config.CHECK_ONLINE_INTERVAL))
))
continue
for fd in r:
if fd != sys.stdin:
# Set the new status line only if non-empty
msg = fd.readline()
if msg:
self.set_status(msg[:-1])
else:
# Main event loop
c = self.pads[self.current_pad].getch()
if c == curses.KEY_UP or c == ord('k'):
self.move(-1)
elif c == curses.KEY_DOWN or c == ord('j'):
self.move(1)
elif c == ord('f'):
if self.current_pad == 'streams':
self.filter_streams()
elif c == ord('F'):
if self.current_pad == 'streams':
self.clear_filter()
elif c == ord('g'):
if self.got_g:
self.move(0, absolute=True)
self.got_g = False
continue
self.got_g = True
elif c == ord('G'):
self.move(len(self.filtered_streams)-1, absolute=True)
elif c == ord('q'):
if self.current_pad == 'streams':
self.q.terminate()
return
else:
self.show_streams()
elif c == 27: # ESC
if self.current_pad != 'streams':
self.show_streams()
if self.current_pad == 'help':
continue
elif c == 10:
self.play_stream()
elif c == ord('s'):
self.stop_stream()
elif c == ord('c'):
self.reset_stream()
elif c == ord('n'):
self.edit_stream('name')
elif c == ord('r'):
self.edit_stream('res')
elif c == ord('u'):
self.edit_stream('url')
elif c == ord('l'):
self.show_commandline()
elif c == ord('L'):
self.shift_commandline()
elif c == ord('a'):
self.prompt_new_stream()
elif c == ord('d'):
self.delete_stream()
elif c == ord('o'):
self.show_offline_streams ^= True
self.refilter_streams()
elif c == ord('O'):
self.check_online_streams()
elif c == ord('h') or c == ord('?'):
self.show_help()
def overwrite_line(self, msg, attr=curses.A_NORMAL):
self.s.clrtoeol()
self.s.addstr(msg, attr)
self.s.chgat(attr)
def set_title(self, msg):
""" Set first header line text """
self.s.move(0, 0)
self.overwrite_line(msg, curses.A_REVERSE)
def set_header(self, msg):
""" Set second head line text """
self.s.move(1, 0)
self.overwrite_line(msg, attr=curses.A_NORMAL)
def set_footer(self, msg, reverse=True):
""" Set first footer line text """
self.s.move(self.max_y-1, 0)
if reverse:
self.overwrite_line(msg, attr=curses.A_REVERSE)
else:
self.overwrite_line(msg, attr=curses.A_NORMAL)
def clear_footer(self):
self.s.move(self.max_y-1, 0)
self.overwrite_line('')
def init_help(self):
help_pad_length = 27 # there should be a neater way to do this
h = curses.newpad(help_pad_length, self.pad_w)
h.keypad(1)
h.addstr( 0, 0, 'STREAM MANAGEMENT', curses.A_BOLD)
h.addstr( 2, 0, ' Enter : start stream')
h.addstr( 3, 0, ' s : stop stream')
h.addstr( 4, 0, ' r : change stream resolution')
h.addstr( 5, 0, ' n : change stream name')
h.addstr( 6, 0, ' u : change stream URL')
h.addstr( 7, 0, ' c : reset stream view count')
h.addstr( 8, 0, ' a : add stream')
h.addstr( 9, 0, ' d : delete stream')
h.addstr(11, 0, ' l : show command line')
h.addstr(12, 0, ' L : cycle command line')
h.addstr(15, 0, 'NAVIGATION', curses.A_BOLD)
h.addstr(17, 0, ' j/up : up one line')
h.addstr(18, 0, ' k/down: down one line')
h.addstr(19, 0, ' f : filter streams')
h.addstr(20, 0, ' F : clear filter')
h.addstr(21, 0, ' o : toggle offline streams')
h.addstr(22, 0, ' O : check for online streams')
h.addstr(23, 0, ' gg : go to top')
h.addstr(24, 0, ' G : go to bottom')
h.addstr(25, 0, ' h/? : show this help')
h.addstr(26, 0, ' q : quit')
self.pads['help'] = h
self.offsets['help'] = 0
def show(self):
funcs = {
'streams' : self.show_streams,
'help' : self.show_help
}
funcs[self.current_pad]()
def show_help(self):
""" Redraw Help screen and wait for any input to leave """
self.s.move(1,0)
self.s.clrtobot()
self.set_header('Help'.center(self.pad_w))
self.set_footer(' ESC or \'q\' to return to main menu')
self.s.refresh()
self.current_pad = 'help'
self.refresh_current_pad()
def init_streams_pad(self, start_row=0):
""" Create a curses pad and populate it with a line by stream """
y = 0
pad = curses.newpad(max(1,len(self.filtered_streams)), self.pad_w)
pad.keypad(1)
for s in self.filtered_streams:
pad.addstr(y, 0, self.format_stream_line(s))
y+=1
self.offsets['streams'] = 0
pad.move(start_row, 0)
if not self.no_stream_shown:
pad.chgat(curses.A_REVERSE)
self.pads['streams'] = pad
def show_streams(self):
self.s.move(1,0)
self.s.clrtobot()
self.current_pad = 'streams'
if self.no_stream_shown:
self.hide_streams_pad()
if self.no_streams:
self.s.addstr(5, 5, 'It seems you don\'t have any stream yet')
self.s.addstr(6, 5, 'Hit \'a\' to add a new one')
self.s.addstr(8, 5, 'Hit \'?\' for help')
elif self.all_streams_offline and not self.show_offline_streams:
self.s.addstr(5, 5, 'All streams are currently offline')
self.s.addstr(6, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(7, 5, 'Hit \'O\' to refresh')
self.s.addstr(9, 5, 'Hit \'?\' for help')
else:
self.s.addstr(5, 5, 'No stream matches your filter')
self.s.addstr(6, 5, 'Hit \'f\' to change filter')
self.s.addstr(7, 5, 'Hit \'F\' to clear')
self.s.addstr(8, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(10, 5, 'Hit \'?\' for help')
else:
idf = 'ID'.center(ID_FIELD_WIDTH)
name = 'Name'.center(NAME_FIELD_WIDTH)
res = 'Resolution'.center(RES_FIELD_WIDTH)
views = 'Views'.center(VIEWS_FIELD_WIDTH)
self.set_header('{0} {1} {2} {3} Status'.format(idf, name, res, views))
self.redraw_stream_footer()
self.redraw_status()
self.s.refresh()
if not self.no_stream_shown:
self.refresh_current_pad()
def hide_streams_pad(self):
pad = self.pads.get('streams')
if pad:
pad.refresh(0, 0, 2, 0, 2, 0)
def refresh_current_pad(self):
pad = self.pads[self.current_pad]
pad.refresh(self.offsets[self.current_pad], 0, 2, self.pad_x, self.pad_h, self.pad_w)
def move(self, direction, absolute=False, pad_name=None, refresh=True):
""" Scroll the current pad
direction : (int) move by one in the given direction
-1 is up, 1 is down. If absolute is True,
go to position direction.
Behaviour is affected by cursor_line and scroll_only below
absolute : (bool)
"""
# pad in this lists have the current line highlighted
cursor_line = [ 'streams' ]
# pads in this list will be moved screen-wise as opposed to line-wise
# if absolute is set, will go all the way top or all the way down depending
# on direction
scroll_only = [ 'help' ]
if not pad_name:
pad_name = self.current_pad
pad = self.pads[pad_name]
if pad_name == 'streams' and self.no_streams:
return
(row, col) = pad.getyx()
new_row = row
offset = self.offsets[pad_name]
new_offset = offset
if pad_name in scroll_only:
if absolute:
if direction > 0:
new_offset = pad.getmaxyx()[0] - self.pad_h + 1
else:
new_offset = 0
else:
if direction > 0:
new_offset = min(pad.getmaxyx()[0] - self.pad_h + 1, offset + self.pad_h)
elif offset > 0:
new_offset = max(0, offset - self.pad_h)
else:
if absolute and direction >= 0 and direction < pad.getmaxyx()[0]:
if direction < offset:
new_offset = direction
elif direction > offset + self.pad_h - 2:
new_offset = direction - self.pad_h + 2
new_row = direction
else:
if direction == -1 and row > 0:
if row == offset:
new_offset -= 1
new_row = row-1
elif direction == 1 and row < len(self.filtered_streams)-1:
if row == offset + self.pad_h - 2:
new_offset += 1
new_row = row+1
if pad_name in cursor_line:
pad.move(row, 0)
pad.chgat(curses.A_NORMAL)
self.offsets[pad_name] = new_offset
pad.move(new_row, 0)
if pad_name in cursor_line:
pad.chgat(curses.A_REVERSE)
if pad_name == 'streams':
self.redraw_stream_footer()
if refresh:
self.refresh_current_pad()
def format_stream_line(self, stream):
idf = '{0} '.format(stream['id']).rjust(ID_FIELD_WIDTH)
name = ' {0}'.format(stream['name'][:NAME_FIELD_WIDTH-2]).ljust(NAME_FIELD_WIDTH)
res = ' {0}'.format(stream['res'][:RES_FIELD_WIDTH-2]).ljust(RES_FIELD_WIDTH)
views = '{0} '.format(stream['seen']).rjust(VIEWS_FIELD_WIDTH)
p = self.q.get_process(stream['id']) != None
if p:
indicator = self.config.INDICATORS[4] # playing
else:
indicator = self.config.INDICATORS[stream['online']]
return '{0} {1} {2} {3} {4}'.format(idf, name, res, views, indicator)
def redraw_current_line(self):
""" Redraw the highlighted line """
if self.no_streams:
return
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
pad = self.pads['streams']
pad.move(row, 0)
pad.clrtoeol()
pad.addstr(row, 0, self.format_stream_line(s), curses.A_REVERSE)
pad.chgat(curses.A_REVERSE)
pad.move(row, 0)
self.refresh_current_pad()
def set_status(self, status):
self.status = status
self.redraw_status()
def redraw_status(self):
self.s.move(self.max_y, 0)
self.overwrite_line(self.status[:self.max_x], curses.A_NORMAL)
self.s.refresh()
def redraw_stream_footer(self):
if not self.no_stream_shown:
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
self.set_footer('{0}/{1} {2} {3}'.format(row+1, len(self.filtered_streams), s['url'], s['res']))
self.s.refresh()
def check_stopped_streams(self):
finished = self.q.get_finished()
for f in finished:
for s in self.streams:
try:
i = self.filtered_streams.index(s)
except ValueError:
continue
if f == s['id']:
self.set_footer('Stream {0} has stopped'.format(s['name']))
if i == self.pads[self.current_pad].getyx()[0]:
attr = curses.A_REVERSE
else:
attr = curses.A_NORMAL
self.pads['streams'].addstr(i, PLAYING_FIELD_OFFSET,
self.config.INDICATORS[s['online']], attr)
self.refresh_current_pad()
def _check_stream(self, url):
try:
plugin = self.livestreamer.resolve_url(url)
avail_streams = plugin.get_streams()
if avail_streams:
return 1
return 0
except:
return 3
def check_online_streams(self):
self.all_streams_offline = True
self.set_status(' Checking online streams...')
done_queue = queue.Queue()
def check_stream_managed(args):
url, queue = args
status = self._check_stream(url)
done_queue.put(url)
return status
pool = Pool(self.config.CHECK_ONLINE_THREADS)
args = [(s['url'], done_queue) for s in self.streams]
statuses = pool.map_async(check_stream_managed, args)
n_streams = len(self.streams)
while not statuses.ready():
sleep(0.1)
self.set_status(' Checked {0}/{1} streams...'.format(done_queue.qsize(), n_streams))
self.s.refresh()
statuses = statuses.get()
for i, s in enumerate(self.streams):
s['online'] = statuses[i]
if s['online']:
self.all_streams_offline = False
self.refilter_streams()
self.last_autocheck = int(time())
pool.close()
def prompt_input(self, prompt=''):
self.s.move(self.max_y, 0)
self.s.clrtoeol()
self.s.addstr(prompt)
curses.curs_set(1)
curses.echo()
r = self.s.getstr().decode()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y, 0)
self.s.clrtoeol()
return r
def prompt_confirmation(self, prompt='', def_yes=False):
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if def_yes:
hint = '[y]/n'
else:
hint = 'y/[n]'
self.s.addstr('{0} {1} '.format(prompt, hint))
curses.curs_set(1)
curses.echo()
r = self.s.getch()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if r == ord('y'):
return True
elif r == ord('n'):
return False
else:
return def_yes
def sync_store(self):
self.store['streams'] = self.streams
self.store.sync()
def bump_stream(self, stream, throttle=False):
t = int(time())
# only bump if stream was last started some time ago
if throttle and t - stream['last_seen'] < 60*1:
return
stream['seen'] += 1
stream['last_seen'] = t
self.sync_store()
def find_stream(self, sel, key='id'):
for s in self.streams:
if s[key] == sel:
return s
return None
def clear_filter(self):
self.filter = ''
self.refilter_streams()
def filter_streams(self):
self.filter = self.prompt_input('Filter: ').lower()
self.refilter_streams()
def refilter_streams(self, quiet=False):
self.filtered_streams = []
for s in self.streams:
if ((self.show_offline_streams or s['online'] in [1,2])
and (self.filter in s['name'].lower() or self.filter in s['url'].lower())):
self.filtered_streams.append(s)
self.filtered_streams.sort(key=lambda s:s['seen'], reverse=True)
self.no_stream_shown = len(self.filtered_streams) == 0
if not quiet:
self.status = ' Filter: {0} ({1}/{2} matches, {3} showing offline streams)'.format(
self.filter or '<empty>', len(self.filtered_streams), len(self.streams),
'' if self.show_offline_streams else 'NOT')
self.init_streams_pad()
self.redraw_stream_footer()
self.show_streams()
self.redraw_status()
def add_stream(self, name, url, res=None, bump=False):
ex_stream = self.find_stream(url, key='url')
if ex_stream:
if bump:
self.bump_stream(ex_stream)
else:
if bump:
seen = 1
last_seen = int(time())
else:
seen = last_seen = 0
if not self.streams:
idf = 1
else:
self.max_id += 1
idf = self.max_id
s_res = res or self.default_res
if type(s_res) == str:
actual_res = s_res
elif type(s_res) == dict:
actual_res = DEFAULT_RESOLUTION_HARD
for k,v in s_res.items():
if k in url:
actual_res = v
break
elif callable(s_res):
actual_res = s_res(url) or DEFAULT_RESOLUTION_HARD
else:
actual_res = DEFAULT_RESOLUTION_HARD
self.set_status(' Checking if new stream is online...')
self.s.refresh()
online = self._check_stream(url)
new_stream = {
'id' : idf,
'name' : name,
'seen' : seen,
'last_seen' : last_seen,
'res' : actual_res,
'url' : url,
'online' : online
}
self.streams.append(new_stream)
self.no_streams = False
self.refilter_streams()
self.sync_store()
def delete_stream(self):
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Delete stream {0}?'.format(s['name'])):
return
self.filtered_streams.remove(s)
self.streams.remove(s)
pad.deleteln()
self.sync_store()
if not self.streams:
self.no_streams = True
if not self.filtered_streams:
self.no_stream_shown = True
if pad.getyx()[0] == len(self.filtered_streams) and not self.no_stream_shown:
self.move(-1, refresh=False)
pad.chgat(curses.A_REVERSE)
self.redraw_current_line()
self.show_streams()
def reset_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Reset stream {0}?'.format(s['name'])):
return
s['seen'] = 0
s['last_seen'] = 0
self.redraw_current_line()
self.sync_store()
def edit_stream(self, attr):
prompt_info = {
'name' : 'Name',
'url' : 'URL',
'res' : 'Resolution'
}
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
new_val = self.prompt_input('{0} (empty to cancel): '.format(prompt_info[attr]))
if new_val != '':
s[attr] = new_val
self.redraw_current_line()
self.redraw_status()
self.redraw_stream_footer()
def show_commandline(self):
self.set_footer('{0}/{1} {2}'.format(self.cmd_index+1, len(self.cmd_list), ' '.join(self.cmd)))
def shift_commandline(self):
self.cmd_index += 1
if self.cmd_index == len(self.cmd_list):
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.show_commandline()
def prompt_new_stream(self):
url = self.prompt_input('New stream URL (empty to cancel): ')
name = url.split('/')[-1]
if name:
self.add_stream(name, url)
self.move(len(self.filtered_streams)-1, absolute=True, refresh=False)
self.show_streams()
def play_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
try:
self.q.put(s, self.cmd)
self.bump_stream(s, throttle=True)
self.redraw_current_line()
self.refresh_current_pad()
except Exception as e:
if type(e) == QueueDuplicate:
self.set_footer('This stream is already playing')
elif type(e) == OSError:
self.set_footer('/!\ Faulty command line: {0}'.format(e.strerror))
else:
raise e
def stop_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
p = self.q.terminate_process(s['id'])
if p:
self.redraw_current_line()
self.redraw_stream_footer()
self.redraw_status()
|
gapato/livestreamer-curses
|
src/livestreamer_curses/streamlist.py
|
StreamList.set_title
|
python
|
def set_title(self, msg):
self.s.move(0, 0)
self.overwrite_line(msg, curses.A_REVERSE)
|
Set first header line text
|
train
|
https://github.com/gapato/livestreamer-curses/blob/d841a421422db8c5b5a8bcfcff6b3ddd7ea8a64b/src/livestreamer_curses/streamlist.py#L388-L391
|
[
"def overwrite_line(self, msg, attr=curses.A_NORMAL):\n self.s.clrtoeol()\n self.s.addstr(msg, attr)\n self.s.chgat(attr)\n"
] |
class StreamList(object):
def __init__(self, filename, config, list_streams=False, init_stream_list=None):
""" Init and try to load a stream list, nothing about curses yet """
global TITLE_STRING
self.db_was_read = False
# Open the storage (create it if necessary)
try:
db_dir = os.path.dirname(filename)
if not os.path.exists(db_dir):
os.makedirs(db_dir)
f = shelve.open(filename, 'c')
except Exception:
raise ShelveError(
'Database could not be opened, another livestreamer-curses instance might be already running. '
'Please note that a database created with Python 2.x cannot be used with Python 3.x and vice versa.'
)
self.max_id = 0
if init_stream_list:
f['streams'] = init_stream_list
for i, s in enumerate(f['streams']):
s['id'] = s.get('id') or i
s['seen'] = s.get('seen') or 0
s['last_seen'] = s.get('last_seen') or 0
self.max_id = i
f.sync()
# Sort streams by view count
try:
self.streams = sorted(f['streams'], key=lambda s:s['seen'], reverse=True)
for s in self.streams:
# Max id, needed when adding a new stream
self.max_id = max(self.max_id, s['id'])
s['online'] = 2
if list_streams:
print(json.dumps(self.streams))
f.close()
sys.exit(0)
except:
self.streams = []
self.db_was_read = True
self.filtered_streams = list(self.streams)
self.filter = ''
self.all_streams_offline = None
self.show_offline_streams = False
self.config = config
TITLE_STRING = TITLE_STRING.format(self.config.VERSION)
self.cmd_list = list(map(shlex.split, self.config.LIVESTREAMER_COMMANDS))
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.last_autocheck = 0
self.default_res = self.config.DEFAULT_RESOLUTION
self.store = f
self.store.sync()
self.no_streams = self.streams == []
self.no_stream_shown = self.no_streams
self.q = ProcessList(StreamPlayer().play)
self.livestreamer = livestreamer.Livestreamer()
def __del__(self):
""" Stop playing streams and sync storage """
try:
self.q.terminate()
if self.db_was_read:
self.store['cmd'] = self.cmd
self.store['streams'] = self.streams
self.store.close()
except:
pass
def __call__(self, s):
# Terminal initialization
self.init(s)
# Main event loop
self.run()
def init(self, s):
""" Initialize the text interface """
# Hide cursor
curses.curs_set(0)
self.s = s
self.s.keypad(1)
self.set_screen_size()
self.pads = {}
self.offsets = {}
self.init_help()
self.init_streams_pad()
self.current_pad = 'streams'
self.set_title(TITLE_STRING)
self.got_g = False
signal.signal(28, self.resize)
if self.config.CHECK_ONLINE_ON_START:
self.check_online_streams()
self.set_status('Ready')
def getheightwidth(self):
""" getwidth() -> (int, int)
Return the height and width of the console in characters
https://groups.google.com/forum/#!msg/comp.lang.python/CpUszNNXUQM/QADpl11Z-nAJ"""
try:
return int(os.environ["LINES"]), int(os.environ["COLUMNS"])
except KeyError:
height, width = struct.unpack(
"hhhh", ioctl(0, termios.TIOCGWINSZ ,"\000"*8))[0:2]
if not height:
return 25, 80
return height, width
def resize(self, signum, obj):
""" handler for SIGWINCH """
self.s.clear()
stream_cursor = self.pads['streams'].getyx()[0]
for pad in self.pads.values():
pad.clear()
self.s.refresh()
self.set_screen_size()
self.set_title(TITLE_STRING)
self.init_help()
self.init_streams_pad()
self.move(stream_cursor, absolute=True, pad_name='streams', refresh=False)
self.s.refresh()
self.show()
def run(self):
""" Main event loop """
# Show stream list
self.show_streams()
while True:
self.s.refresh()
# See if any stream has ended
self.check_stopped_streams()
# Wait on stdin or on the streams output
souts = self.q.get_stdouts()
souts.append(sys.stdin)
try:
(r, w, x) = select.select(souts, [], [], 1)
except select.error:
continue
if not r:
if self.config.CHECK_ONLINE_INTERVAL <= 0: continue
cur_time = int(time())
time_delta = cur_time - self.last_autocheck
if time_delta > self.config.CHECK_ONLINE_INTERVAL:
self.check_online_streams()
self.set_status('Next check at {0}'.format(
strftime('%H:%M:%S', localtime(time() + self.config.CHECK_ONLINE_INTERVAL))
))
continue
for fd in r:
if fd != sys.stdin:
# Set the new status line only if non-empty
msg = fd.readline()
if msg:
self.set_status(msg[:-1])
else:
# Main event loop
c = self.pads[self.current_pad].getch()
if c == curses.KEY_UP or c == ord('k'):
self.move(-1)
elif c == curses.KEY_DOWN or c == ord('j'):
self.move(1)
elif c == ord('f'):
if self.current_pad == 'streams':
self.filter_streams()
elif c == ord('F'):
if self.current_pad == 'streams':
self.clear_filter()
elif c == ord('g'):
if self.got_g:
self.move(0, absolute=True)
self.got_g = False
continue
self.got_g = True
elif c == ord('G'):
self.move(len(self.filtered_streams)-1, absolute=True)
elif c == ord('q'):
if self.current_pad == 'streams':
self.q.terminate()
return
else:
self.show_streams()
elif c == 27: # ESC
if self.current_pad != 'streams':
self.show_streams()
if self.current_pad == 'help':
continue
elif c == 10:
self.play_stream()
elif c == ord('s'):
self.stop_stream()
elif c == ord('c'):
self.reset_stream()
elif c == ord('n'):
self.edit_stream('name')
elif c == ord('r'):
self.edit_stream('res')
elif c == ord('u'):
self.edit_stream('url')
elif c == ord('l'):
self.show_commandline()
elif c == ord('L'):
self.shift_commandline()
elif c == ord('a'):
self.prompt_new_stream()
elif c == ord('d'):
self.delete_stream()
elif c == ord('o'):
self.show_offline_streams ^= True
self.refilter_streams()
elif c == ord('O'):
self.check_online_streams()
elif c == ord('h') or c == ord('?'):
self.show_help()
def set_screen_size(self):
""" Setup screen size and padding
We have need 2 free lines at the top and 2 free lines at the bottom
"""
height, width = self.getheightwidth()
curses.resizeterm(height, width)
self.pad_x = 0
self.max_y, self.max_x = (height-1, width-1)
self.pad_h = height-3
self.pad_w = width-2*self.pad_x
def overwrite_line(self, msg, attr=curses.A_NORMAL):
self.s.clrtoeol()
self.s.addstr(msg, attr)
self.s.chgat(attr)
def set_header(self, msg):
""" Set second head line text """
self.s.move(1, 0)
self.overwrite_line(msg, attr=curses.A_NORMAL)
def set_footer(self, msg, reverse=True):
""" Set first footer line text """
self.s.move(self.max_y-1, 0)
if reverse:
self.overwrite_line(msg, attr=curses.A_REVERSE)
else:
self.overwrite_line(msg, attr=curses.A_NORMAL)
def clear_footer(self):
self.s.move(self.max_y-1, 0)
self.overwrite_line('')
def init_help(self):
help_pad_length = 27 # there should be a neater way to do this
h = curses.newpad(help_pad_length, self.pad_w)
h.keypad(1)
h.addstr( 0, 0, 'STREAM MANAGEMENT', curses.A_BOLD)
h.addstr( 2, 0, ' Enter : start stream')
h.addstr( 3, 0, ' s : stop stream')
h.addstr( 4, 0, ' r : change stream resolution')
h.addstr( 5, 0, ' n : change stream name')
h.addstr( 6, 0, ' u : change stream URL')
h.addstr( 7, 0, ' c : reset stream view count')
h.addstr( 8, 0, ' a : add stream')
h.addstr( 9, 0, ' d : delete stream')
h.addstr(11, 0, ' l : show command line')
h.addstr(12, 0, ' L : cycle command line')
h.addstr(15, 0, 'NAVIGATION', curses.A_BOLD)
h.addstr(17, 0, ' j/up : up one line')
h.addstr(18, 0, ' k/down: down one line')
h.addstr(19, 0, ' f : filter streams')
h.addstr(20, 0, ' F : clear filter')
h.addstr(21, 0, ' o : toggle offline streams')
h.addstr(22, 0, ' O : check for online streams')
h.addstr(23, 0, ' gg : go to top')
h.addstr(24, 0, ' G : go to bottom')
h.addstr(25, 0, ' h/? : show this help')
h.addstr(26, 0, ' q : quit')
self.pads['help'] = h
self.offsets['help'] = 0
def show(self):
funcs = {
'streams' : self.show_streams,
'help' : self.show_help
}
funcs[self.current_pad]()
def show_help(self):
""" Redraw Help screen and wait for any input to leave """
self.s.move(1,0)
self.s.clrtobot()
self.set_header('Help'.center(self.pad_w))
self.set_footer(' ESC or \'q\' to return to main menu')
self.s.refresh()
self.current_pad = 'help'
self.refresh_current_pad()
def init_streams_pad(self, start_row=0):
""" Create a curses pad and populate it with a line by stream """
y = 0
pad = curses.newpad(max(1,len(self.filtered_streams)), self.pad_w)
pad.keypad(1)
for s in self.filtered_streams:
pad.addstr(y, 0, self.format_stream_line(s))
y+=1
self.offsets['streams'] = 0
pad.move(start_row, 0)
if not self.no_stream_shown:
pad.chgat(curses.A_REVERSE)
self.pads['streams'] = pad
def show_streams(self):
self.s.move(1,0)
self.s.clrtobot()
self.current_pad = 'streams'
if self.no_stream_shown:
self.hide_streams_pad()
if self.no_streams:
self.s.addstr(5, 5, 'It seems you don\'t have any stream yet')
self.s.addstr(6, 5, 'Hit \'a\' to add a new one')
self.s.addstr(8, 5, 'Hit \'?\' for help')
elif self.all_streams_offline and not self.show_offline_streams:
self.s.addstr(5, 5, 'All streams are currently offline')
self.s.addstr(6, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(7, 5, 'Hit \'O\' to refresh')
self.s.addstr(9, 5, 'Hit \'?\' for help')
else:
self.s.addstr(5, 5, 'No stream matches your filter')
self.s.addstr(6, 5, 'Hit \'f\' to change filter')
self.s.addstr(7, 5, 'Hit \'F\' to clear')
self.s.addstr(8, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(10, 5, 'Hit \'?\' for help')
else:
idf = 'ID'.center(ID_FIELD_WIDTH)
name = 'Name'.center(NAME_FIELD_WIDTH)
res = 'Resolution'.center(RES_FIELD_WIDTH)
views = 'Views'.center(VIEWS_FIELD_WIDTH)
self.set_header('{0} {1} {2} {3} Status'.format(idf, name, res, views))
self.redraw_stream_footer()
self.redraw_status()
self.s.refresh()
if not self.no_stream_shown:
self.refresh_current_pad()
def hide_streams_pad(self):
pad = self.pads.get('streams')
if pad:
pad.refresh(0, 0, 2, 0, 2, 0)
def refresh_current_pad(self):
pad = self.pads[self.current_pad]
pad.refresh(self.offsets[self.current_pad], 0, 2, self.pad_x, self.pad_h, self.pad_w)
def move(self, direction, absolute=False, pad_name=None, refresh=True):
""" Scroll the current pad
direction : (int) move by one in the given direction
-1 is up, 1 is down. If absolute is True,
go to position direction.
Behaviour is affected by cursor_line and scroll_only below
absolute : (bool)
"""
# pad in this lists have the current line highlighted
cursor_line = [ 'streams' ]
# pads in this list will be moved screen-wise as opposed to line-wise
# if absolute is set, will go all the way top or all the way down depending
# on direction
scroll_only = [ 'help' ]
if not pad_name:
pad_name = self.current_pad
pad = self.pads[pad_name]
if pad_name == 'streams' and self.no_streams:
return
(row, col) = pad.getyx()
new_row = row
offset = self.offsets[pad_name]
new_offset = offset
if pad_name in scroll_only:
if absolute:
if direction > 0:
new_offset = pad.getmaxyx()[0] - self.pad_h + 1
else:
new_offset = 0
else:
if direction > 0:
new_offset = min(pad.getmaxyx()[0] - self.pad_h + 1, offset + self.pad_h)
elif offset > 0:
new_offset = max(0, offset - self.pad_h)
else:
if absolute and direction >= 0 and direction < pad.getmaxyx()[0]:
if direction < offset:
new_offset = direction
elif direction > offset + self.pad_h - 2:
new_offset = direction - self.pad_h + 2
new_row = direction
else:
if direction == -1 and row > 0:
if row == offset:
new_offset -= 1
new_row = row-1
elif direction == 1 and row < len(self.filtered_streams)-1:
if row == offset + self.pad_h - 2:
new_offset += 1
new_row = row+1
if pad_name in cursor_line:
pad.move(row, 0)
pad.chgat(curses.A_NORMAL)
self.offsets[pad_name] = new_offset
pad.move(new_row, 0)
if pad_name in cursor_line:
pad.chgat(curses.A_REVERSE)
if pad_name == 'streams':
self.redraw_stream_footer()
if refresh:
self.refresh_current_pad()
def format_stream_line(self, stream):
idf = '{0} '.format(stream['id']).rjust(ID_FIELD_WIDTH)
name = ' {0}'.format(stream['name'][:NAME_FIELD_WIDTH-2]).ljust(NAME_FIELD_WIDTH)
res = ' {0}'.format(stream['res'][:RES_FIELD_WIDTH-2]).ljust(RES_FIELD_WIDTH)
views = '{0} '.format(stream['seen']).rjust(VIEWS_FIELD_WIDTH)
p = self.q.get_process(stream['id']) != None
if p:
indicator = self.config.INDICATORS[4] # playing
else:
indicator = self.config.INDICATORS[stream['online']]
return '{0} {1} {2} {3} {4}'.format(idf, name, res, views, indicator)
def redraw_current_line(self):
""" Redraw the highlighted line """
if self.no_streams:
return
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
pad = self.pads['streams']
pad.move(row, 0)
pad.clrtoeol()
pad.addstr(row, 0, self.format_stream_line(s), curses.A_REVERSE)
pad.chgat(curses.A_REVERSE)
pad.move(row, 0)
self.refresh_current_pad()
def set_status(self, status):
self.status = status
self.redraw_status()
def redraw_status(self):
self.s.move(self.max_y, 0)
self.overwrite_line(self.status[:self.max_x], curses.A_NORMAL)
self.s.refresh()
def redraw_stream_footer(self):
if not self.no_stream_shown:
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
self.set_footer('{0}/{1} {2} {3}'.format(row+1, len(self.filtered_streams), s['url'], s['res']))
self.s.refresh()
def check_stopped_streams(self):
finished = self.q.get_finished()
for f in finished:
for s in self.streams:
try:
i = self.filtered_streams.index(s)
except ValueError:
continue
if f == s['id']:
self.set_footer('Stream {0} has stopped'.format(s['name']))
if i == self.pads[self.current_pad].getyx()[0]:
attr = curses.A_REVERSE
else:
attr = curses.A_NORMAL
self.pads['streams'].addstr(i, PLAYING_FIELD_OFFSET,
self.config.INDICATORS[s['online']], attr)
self.refresh_current_pad()
def _check_stream(self, url):
try:
plugin = self.livestreamer.resolve_url(url)
avail_streams = plugin.get_streams()
if avail_streams:
return 1
return 0
except:
return 3
def check_online_streams(self):
self.all_streams_offline = True
self.set_status(' Checking online streams...')
done_queue = queue.Queue()
def check_stream_managed(args):
url, queue = args
status = self._check_stream(url)
done_queue.put(url)
return status
pool = Pool(self.config.CHECK_ONLINE_THREADS)
args = [(s['url'], done_queue) for s in self.streams]
statuses = pool.map_async(check_stream_managed, args)
n_streams = len(self.streams)
while not statuses.ready():
sleep(0.1)
self.set_status(' Checked {0}/{1} streams...'.format(done_queue.qsize(), n_streams))
self.s.refresh()
statuses = statuses.get()
for i, s in enumerate(self.streams):
s['online'] = statuses[i]
if s['online']:
self.all_streams_offline = False
self.refilter_streams()
self.last_autocheck = int(time())
pool.close()
def prompt_input(self, prompt=''):
self.s.move(self.max_y, 0)
self.s.clrtoeol()
self.s.addstr(prompt)
curses.curs_set(1)
curses.echo()
r = self.s.getstr().decode()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y, 0)
self.s.clrtoeol()
return r
def prompt_confirmation(self, prompt='', def_yes=False):
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if def_yes:
hint = '[y]/n'
else:
hint = 'y/[n]'
self.s.addstr('{0} {1} '.format(prompt, hint))
curses.curs_set(1)
curses.echo()
r = self.s.getch()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if r == ord('y'):
return True
elif r == ord('n'):
return False
else:
return def_yes
def sync_store(self):
self.store['streams'] = self.streams
self.store.sync()
def bump_stream(self, stream, throttle=False):
t = int(time())
# only bump if stream was last started some time ago
if throttle and t - stream['last_seen'] < 60*1:
return
stream['seen'] += 1
stream['last_seen'] = t
self.sync_store()
def find_stream(self, sel, key='id'):
for s in self.streams:
if s[key] == sel:
return s
return None
def clear_filter(self):
self.filter = ''
self.refilter_streams()
def filter_streams(self):
self.filter = self.prompt_input('Filter: ').lower()
self.refilter_streams()
def refilter_streams(self, quiet=False):
self.filtered_streams = []
for s in self.streams:
if ((self.show_offline_streams or s['online'] in [1,2])
and (self.filter in s['name'].lower() or self.filter in s['url'].lower())):
self.filtered_streams.append(s)
self.filtered_streams.sort(key=lambda s:s['seen'], reverse=True)
self.no_stream_shown = len(self.filtered_streams) == 0
if not quiet:
self.status = ' Filter: {0} ({1}/{2} matches, {3} showing offline streams)'.format(
self.filter or '<empty>', len(self.filtered_streams), len(self.streams),
'' if self.show_offline_streams else 'NOT')
self.init_streams_pad()
self.redraw_stream_footer()
self.show_streams()
self.redraw_status()
def add_stream(self, name, url, res=None, bump=False):
ex_stream = self.find_stream(url, key='url')
if ex_stream:
if bump:
self.bump_stream(ex_stream)
else:
if bump:
seen = 1
last_seen = int(time())
else:
seen = last_seen = 0
if not self.streams:
idf = 1
else:
self.max_id += 1
idf = self.max_id
s_res = res or self.default_res
if type(s_res) == str:
actual_res = s_res
elif type(s_res) == dict:
actual_res = DEFAULT_RESOLUTION_HARD
for k,v in s_res.items():
if k in url:
actual_res = v
break
elif callable(s_res):
actual_res = s_res(url) or DEFAULT_RESOLUTION_HARD
else:
actual_res = DEFAULT_RESOLUTION_HARD
self.set_status(' Checking if new stream is online...')
self.s.refresh()
online = self._check_stream(url)
new_stream = {
'id' : idf,
'name' : name,
'seen' : seen,
'last_seen' : last_seen,
'res' : actual_res,
'url' : url,
'online' : online
}
self.streams.append(new_stream)
self.no_streams = False
self.refilter_streams()
self.sync_store()
def delete_stream(self):
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Delete stream {0}?'.format(s['name'])):
return
self.filtered_streams.remove(s)
self.streams.remove(s)
pad.deleteln()
self.sync_store()
if not self.streams:
self.no_streams = True
if not self.filtered_streams:
self.no_stream_shown = True
if pad.getyx()[0] == len(self.filtered_streams) and not self.no_stream_shown:
self.move(-1, refresh=False)
pad.chgat(curses.A_REVERSE)
self.redraw_current_line()
self.show_streams()
def reset_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Reset stream {0}?'.format(s['name'])):
return
s['seen'] = 0
s['last_seen'] = 0
self.redraw_current_line()
self.sync_store()
def edit_stream(self, attr):
prompt_info = {
'name' : 'Name',
'url' : 'URL',
'res' : 'Resolution'
}
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
new_val = self.prompt_input('{0} (empty to cancel): '.format(prompt_info[attr]))
if new_val != '':
s[attr] = new_val
self.redraw_current_line()
self.redraw_status()
self.redraw_stream_footer()
def show_commandline(self):
self.set_footer('{0}/{1} {2}'.format(self.cmd_index+1, len(self.cmd_list), ' '.join(self.cmd)))
def shift_commandline(self):
self.cmd_index += 1
if self.cmd_index == len(self.cmd_list):
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.show_commandline()
def prompt_new_stream(self):
url = self.prompt_input('New stream URL (empty to cancel): ')
name = url.split('/')[-1]
if name:
self.add_stream(name, url)
self.move(len(self.filtered_streams)-1, absolute=True, refresh=False)
self.show_streams()
def play_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
try:
self.q.put(s, self.cmd)
self.bump_stream(s, throttle=True)
self.redraw_current_line()
self.refresh_current_pad()
except Exception as e:
if type(e) == QueueDuplicate:
self.set_footer('This stream is already playing')
elif type(e) == OSError:
self.set_footer('/!\ Faulty command line: {0}'.format(e.strerror))
else:
raise e
def stop_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
p = self.q.terminate_process(s['id'])
if p:
self.redraw_current_line()
self.redraw_stream_footer()
self.redraw_status()
|
gapato/livestreamer-curses
|
src/livestreamer_curses/streamlist.py
|
StreamList.set_header
|
python
|
def set_header(self, msg):
self.s.move(1, 0)
self.overwrite_line(msg, attr=curses.A_NORMAL)
|
Set second head line text
|
train
|
https://github.com/gapato/livestreamer-curses/blob/d841a421422db8c5b5a8bcfcff6b3ddd7ea8a64b/src/livestreamer_curses/streamlist.py#L393-L396
|
[
"def overwrite_line(self, msg, attr=curses.A_NORMAL):\n self.s.clrtoeol()\n self.s.addstr(msg, attr)\n self.s.chgat(attr)\n"
] |
class StreamList(object):
def __init__(self, filename, config, list_streams=False, init_stream_list=None):
""" Init and try to load a stream list, nothing about curses yet """
global TITLE_STRING
self.db_was_read = False
# Open the storage (create it if necessary)
try:
db_dir = os.path.dirname(filename)
if not os.path.exists(db_dir):
os.makedirs(db_dir)
f = shelve.open(filename, 'c')
except Exception:
raise ShelveError(
'Database could not be opened, another livestreamer-curses instance might be already running. '
'Please note that a database created with Python 2.x cannot be used with Python 3.x and vice versa.'
)
self.max_id = 0
if init_stream_list:
f['streams'] = init_stream_list
for i, s in enumerate(f['streams']):
s['id'] = s.get('id') or i
s['seen'] = s.get('seen') or 0
s['last_seen'] = s.get('last_seen') or 0
self.max_id = i
f.sync()
# Sort streams by view count
try:
self.streams = sorted(f['streams'], key=lambda s:s['seen'], reverse=True)
for s in self.streams:
# Max id, needed when adding a new stream
self.max_id = max(self.max_id, s['id'])
s['online'] = 2
if list_streams:
print(json.dumps(self.streams))
f.close()
sys.exit(0)
except:
self.streams = []
self.db_was_read = True
self.filtered_streams = list(self.streams)
self.filter = ''
self.all_streams_offline = None
self.show_offline_streams = False
self.config = config
TITLE_STRING = TITLE_STRING.format(self.config.VERSION)
self.cmd_list = list(map(shlex.split, self.config.LIVESTREAMER_COMMANDS))
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.last_autocheck = 0
self.default_res = self.config.DEFAULT_RESOLUTION
self.store = f
self.store.sync()
self.no_streams = self.streams == []
self.no_stream_shown = self.no_streams
self.q = ProcessList(StreamPlayer().play)
self.livestreamer = livestreamer.Livestreamer()
def __del__(self):
""" Stop playing streams and sync storage """
try:
self.q.terminate()
if self.db_was_read:
self.store['cmd'] = self.cmd
self.store['streams'] = self.streams
self.store.close()
except:
pass
def __call__(self, s):
# Terminal initialization
self.init(s)
# Main event loop
self.run()
def init(self, s):
""" Initialize the text interface """
# Hide cursor
curses.curs_set(0)
self.s = s
self.s.keypad(1)
self.set_screen_size()
self.pads = {}
self.offsets = {}
self.init_help()
self.init_streams_pad()
self.current_pad = 'streams'
self.set_title(TITLE_STRING)
self.got_g = False
signal.signal(28, self.resize)
if self.config.CHECK_ONLINE_ON_START:
self.check_online_streams()
self.set_status('Ready')
def getheightwidth(self):
""" getwidth() -> (int, int)
Return the height and width of the console in characters
https://groups.google.com/forum/#!msg/comp.lang.python/CpUszNNXUQM/QADpl11Z-nAJ"""
try:
return int(os.environ["LINES"]), int(os.environ["COLUMNS"])
except KeyError:
height, width = struct.unpack(
"hhhh", ioctl(0, termios.TIOCGWINSZ ,"\000"*8))[0:2]
if not height:
return 25, 80
return height, width
def resize(self, signum, obj):
""" handler for SIGWINCH """
self.s.clear()
stream_cursor = self.pads['streams'].getyx()[0]
for pad in self.pads.values():
pad.clear()
self.s.refresh()
self.set_screen_size()
self.set_title(TITLE_STRING)
self.init_help()
self.init_streams_pad()
self.move(stream_cursor, absolute=True, pad_name='streams', refresh=False)
self.s.refresh()
self.show()
def run(self):
""" Main event loop """
# Show stream list
self.show_streams()
while True:
self.s.refresh()
# See if any stream has ended
self.check_stopped_streams()
# Wait on stdin or on the streams output
souts = self.q.get_stdouts()
souts.append(sys.stdin)
try:
(r, w, x) = select.select(souts, [], [], 1)
except select.error:
continue
if not r:
if self.config.CHECK_ONLINE_INTERVAL <= 0: continue
cur_time = int(time())
time_delta = cur_time - self.last_autocheck
if time_delta > self.config.CHECK_ONLINE_INTERVAL:
self.check_online_streams()
self.set_status('Next check at {0}'.format(
strftime('%H:%M:%S', localtime(time() + self.config.CHECK_ONLINE_INTERVAL))
))
continue
for fd in r:
if fd != sys.stdin:
# Set the new status line only if non-empty
msg = fd.readline()
if msg:
self.set_status(msg[:-1])
else:
# Main event loop
c = self.pads[self.current_pad].getch()
if c == curses.KEY_UP or c == ord('k'):
self.move(-1)
elif c == curses.KEY_DOWN or c == ord('j'):
self.move(1)
elif c == ord('f'):
if self.current_pad == 'streams':
self.filter_streams()
elif c == ord('F'):
if self.current_pad == 'streams':
self.clear_filter()
elif c == ord('g'):
if self.got_g:
self.move(0, absolute=True)
self.got_g = False
continue
self.got_g = True
elif c == ord('G'):
self.move(len(self.filtered_streams)-1, absolute=True)
elif c == ord('q'):
if self.current_pad == 'streams':
self.q.terminate()
return
else:
self.show_streams()
elif c == 27: # ESC
if self.current_pad != 'streams':
self.show_streams()
if self.current_pad == 'help':
continue
elif c == 10:
self.play_stream()
elif c == ord('s'):
self.stop_stream()
elif c == ord('c'):
self.reset_stream()
elif c == ord('n'):
self.edit_stream('name')
elif c == ord('r'):
self.edit_stream('res')
elif c == ord('u'):
self.edit_stream('url')
elif c == ord('l'):
self.show_commandline()
elif c == ord('L'):
self.shift_commandline()
elif c == ord('a'):
self.prompt_new_stream()
elif c == ord('d'):
self.delete_stream()
elif c == ord('o'):
self.show_offline_streams ^= True
self.refilter_streams()
elif c == ord('O'):
self.check_online_streams()
elif c == ord('h') or c == ord('?'):
self.show_help()
def set_screen_size(self):
""" Setup screen size and padding
We have need 2 free lines at the top and 2 free lines at the bottom
"""
height, width = self.getheightwidth()
curses.resizeterm(height, width)
self.pad_x = 0
self.max_y, self.max_x = (height-1, width-1)
self.pad_h = height-3
self.pad_w = width-2*self.pad_x
def overwrite_line(self, msg, attr=curses.A_NORMAL):
self.s.clrtoeol()
self.s.addstr(msg, attr)
self.s.chgat(attr)
def set_title(self, msg):
""" Set first header line text """
self.s.move(0, 0)
self.overwrite_line(msg, curses.A_REVERSE)
def set_footer(self, msg, reverse=True):
""" Set first footer line text """
self.s.move(self.max_y-1, 0)
if reverse:
self.overwrite_line(msg, attr=curses.A_REVERSE)
else:
self.overwrite_line(msg, attr=curses.A_NORMAL)
def clear_footer(self):
self.s.move(self.max_y-1, 0)
self.overwrite_line('')
def init_help(self):
help_pad_length = 27 # there should be a neater way to do this
h = curses.newpad(help_pad_length, self.pad_w)
h.keypad(1)
h.addstr( 0, 0, 'STREAM MANAGEMENT', curses.A_BOLD)
h.addstr( 2, 0, ' Enter : start stream')
h.addstr( 3, 0, ' s : stop stream')
h.addstr( 4, 0, ' r : change stream resolution')
h.addstr( 5, 0, ' n : change stream name')
h.addstr( 6, 0, ' u : change stream URL')
h.addstr( 7, 0, ' c : reset stream view count')
h.addstr( 8, 0, ' a : add stream')
h.addstr( 9, 0, ' d : delete stream')
h.addstr(11, 0, ' l : show command line')
h.addstr(12, 0, ' L : cycle command line')
h.addstr(15, 0, 'NAVIGATION', curses.A_BOLD)
h.addstr(17, 0, ' j/up : up one line')
h.addstr(18, 0, ' k/down: down one line')
h.addstr(19, 0, ' f : filter streams')
h.addstr(20, 0, ' F : clear filter')
h.addstr(21, 0, ' o : toggle offline streams')
h.addstr(22, 0, ' O : check for online streams')
h.addstr(23, 0, ' gg : go to top')
h.addstr(24, 0, ' G : go to bottom')
h.addstr(25, 0, ' h/? : show this help')
h.addstr(26, 0, ' q : quit')
self.pads['help'] = h
self.offsets['help'] = 0
def show(self):
funcs = {
'streams' : self.show_streams,
'help' : self.show_help
}
funcs[self.current_pad]()
def show_help(self):
""" Redraw Help screen and wait for any input to leave """
self.s.move(1,0)
self.s.clrtobot()
self.set_header('Help'.center(self.pad_w))
self.set_footer(' ESC or \'q\' to return to main menu')
self.s.refresh()
self.current_pad = 'help'
self.refresh_current_pad()
def init_streams_pad(self, start_row=0):
""" Create a curses pad and populate it with a line by stream """
y = 0
pad = curses.newpad(max(1,len(self.filtered_streams)), self.pad_w)
pad.keypad(1)
for s in self.filtered_streams:
pad.addstr(y, 0, self.format_stream_line(s))
y+=1
self.offsets['streams'] = 0
pad.move(start_row, 0)
if not self.no_stream_shown:
pad.chgat(curses.A_REVERSE)
self.pads['streams'] = pad
def show_streams(self):
self.s.move(1,0)
self.s.clrtobot()
self.current_pad = 'streams'
if self.no_stream_shown:
self.hide_streams_pad()
if self.no_streams:
self.s.addstr(5, 5, 'It seems you don\'t have any stream yet')
self.s.addstr(6, 5, 'Hit \'a\' to add a new one')
self.s.addstr(8, 5, 'Hit \'?\' for help')
elif self.all_streams_offline and not self.show_offline_streams:
self.s.addstr(5, 5, 'All streams are currently offline')
self.s.addstr(6, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(7, 5, 'Hit \'O\' to refresh')
self.s.addstr(9, 5, 'Hit \'?\' for help')
else:
self.s.addstr(5, 5, 'No stream matches your filter')
self.s.addstr(6, 5, 'Hit \'f\' to change filter')
self.s.addstr(7, 5, 'Hit \'F\' to clear')
self.s.addstr(8, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(10, 5, 'Hit \'?\' for help')
else:
idf = 'ID'.center(ID_FIELD_WIDTH)
name = 'Name'.center(NAME_FIELD_WIDTH)
res = 'Resolution'.center(RES_FIELD_WIDTH)
views = 'Views'.center(VIEWS_FIELD_WIDTH)
self.set_header('{0} {1} {2} {3} Status'.format(idf, name, res, views))
self.redraw_stream_footer()
self.redraw_status()
self.s.refresh()
if not self.no_stream_shown:
self.refresh_current_pad()
def hide_streams_pad(self):
pad = self.pads.get('streams')
if pad:
pad.refresh(0, 0, 2, 0, 2, 0)
def refresh_current_pad(self):
pad = self.pads[self.current_pad]
pad.refresh(self.offsets[self.current_pad], 0, 2, self.pad_x, self.pad_h, self.pad_w)
def move(self, direction, absolute=False, pad_name=None, refresh=True):
""" Scroll the current pad
direction : (int) move by one in the given direction
-1 is up, 1 is down. If absolute is True,
go to position direction.
Behaviour is affected by cursor_line and scroll_only below
absolute : (bool)
"""
# pad in this lists have the current line highlighted
cursor_line = [ 'streams' ]
# pads in this list will be moved screen-wise as opposed to line-wise
# if absolute is set, will go all the way top or all the way down depending
# on direction
scroll_only = [ 'help' ]
if not pad_name:
pad_name = self.current_pad
pad = self.pads[pad_name]
if pad_name == 'streams' and self.no_streams:
return
(row, col) = pad.getyx()
new_row = row
offset = self.offsets[pad_name]
new_offset = offset
if pad_name in scroll_only:
if absolute:
if direction > 0:
new_offset = pad.getmaxyx()[0] - self.pad_h + 1
else:
new_offset = 0
else:
if direction > 0:
new_offset = min(pad.getmaxyx()[0] - self.pad_h + 1, offset + self.pad_h)
elif offset > 0:
new_offset = max(0, offset - self.pad_h)
else:
if absolute and direction >= 0 and direction < pad.getmaxyx()[0]:
if direction < offset:
new_offset = direction
elif direction > offset + self.pad_h - 2:
new_offset = direction - self.pad_h + 2
new_row = direction
else:
if direction == -1 and row > 0:
if row == offset:
new_offset -= 1
new_row = row-1
elif direction == 1 and row < len(self.filtered_streams)-1:
if row == offset + self.pad_h - 2:
new_offset += 1
new_row = row+1
if pad_name in cursor_line:
pad.move(row, 0)
pad.chgat(curses.A_NORMAL)
self.offsets[pad_name] = new_offset
pad.move(new_row, 0)
if pad_name in cursor_line:
pad.chgat(curses.A_REVERSE)
if pad_name == 'streams':
self.redraw_stream_footer()
if refresh:
self.refresh_current_pad()
def format_stream_line(self, stream):
idf = '{0} '.format(stream['id']).rjust(ID_FIELD_WIDTH)
name = ' {0}'.format(stream['name'][:NAME_FIELD_WIDTH-2]).ljust(NAME_FIELD_WIDTH)
res = ' {0}'.format(stream['res'][:RES_FIELD_WIDTH-2]).ljust(RES_FIELD_WIDTH)
views = '{0} '.format(stream['seen']).rjust(VIEWS_FIELD_WIDTH)
p = self.q.get_process(stream['id']) != None
if p:
indicator = self.config.INDICATORS[4] # playing
else:
indicator = self.config.INDICATORS[stream['online']]
return '{0} {1} {2} {3} {4}'.format(idf, name, res, views, indicator)
def redraw_current_line(self):
""" Redraw the highlighted line """
if self.no_streams:
return
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
pad = self.pads['streams']
pad.move(row, 0)
pad.clrtoeol()
pad.addstr(row, 0, self.format_stream_line(s), curses.A_REVERSE)
pad.chgat(curses.A_REVERSE)
pad.move(row, 0)
self.refresh_current_pad()
def set_status(self, status):
self.status = status
self.redraw_status()
def redraw_status(self):
self.s.move(self.max_y, 0)
self.overwrite_line(self.status[:self.max_x], curses.A_NORMAL)
self.s.refresh()
def redraw_stream_footer(self):
if not self.no_stream_shown:
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
self.set_footer('{0}/{1} {2} {3}'.format(row+1, len(self.filtered_streams), s['url'], s['res']))
self.s.refresh()
def check_stopped_streams(self):
finished = self.q.get_finished()
for f in finished:
for s in self.streams:
try:
i = self.filtered_streams.index(s)
except ValueError:
continue
if f == s['id']:
self.set_footer('Stream {0} has stopped'.format(s['name']))
if i == self.pads[self.current_pad].getyx()[0]:
attr = curses.A_REVERSE
else:
attr = curses.A_NORMAL
self.pads['streams'].addstr(i, PLAYING_FIELD_OFFSET,
self.config.INDICATORS[s['online']], attr)
self.refresh_current_pad()
def _check_stream(self, url):
try:
plugin = self.livestreamer.resolve_url(url)
avail_streams = plugin.get_streams()
if avail_streams:
return 1
return 0
except:
return 3
def check_online_streams(self):
self.all_streams_offline = True
self.set_status(' Checking online streams...')
done_queue = queue.Queue()
def check_stream_managed(args):
url, queue = args
status = self._check_stream(url)
done_queue.put(url)
return status
pool = Pool(self.config.CHECK_ONLINE_THREADS)
args = [(s['url'], done_queue) for s in self.streams]
statuses = pool.map_async(check_stream_managed, args)
n_streams = len(self.streams)
while not statuses.ready():
sleep(0.1)
self.set_status(' Checked {0}/{1} streams...'.format(done_queue.qsize(), n_streams))
self.s.refresh()
statuses = statuses.get()
for i, s in enumerate(self.streams):
s['online'] = statuses[i]
if s['online']:
self.all_streams_offline = False
self.refilter_streams()
self.last_autocheck = int(time())
pool.close()
def prompt_input(self, prompt=''):
self.s.move(self.max_y, 0)
self.s.clrtoeol()
self.s.addstr(prompt)
curses.curs_set(1)
curses.echo()
r = self.s.getstr().decode()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y, 0)
self.s.clrtoeol()
return r
def prompt_confirmation(self, prompt='', def_yes=False):
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if def_yes:
hint = '[y]/n'
else:
hint = 'y/[n]'
self.s.addstr('{0} {1} '.format(prompt, hint))
curses.curs_set(1)
curses.echo()
r = self.s.getch()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if r == ord('y'):
return True
elif r == ord('n'):
return False
else:
return def_yes
def sync_store(self):
self.store['streams'] = self.streams
self.store.sync()
def bump_stream(self, stream, throttle=False):
t = int(time())
# only bump if stream was last started some time ago
if throttle and t - stream['last_seen'] < 60*1:
return
stream['seen'] += 1
stream['last_seen'] = t
self.sync_store()
def find_stream(self, sel, key='id'):
for s in self.streams:
if s[key] == sel:
return s
return None
def clear_filter(self):
self.filter = ''
self.refilter_streams()
def filter_streams(self):
self.filter = self.prompt_input('Filter: ').lower()
self.refilter_streams()
def refilter_streams(self, quiet=False):
self.filtered_streams = []
for s in self.streams:
if ((self.show_offline_streams or s['online'] in [1,2])
and (self.filter in s['name'].lower() or self.filter in s['url'].lower())):
self.filtered_streams.append(s)
self.filtered_streams.sort(key=lambda s:s['seen'], reverse=True)
self.no_stream_shown = len(self.filtered_streams) == 0
if not quiet:
self.status = ' Filter: {0} ({1}/{2} matches, {3} showing offline streams)'.format(
self.filter or '<empty>', len(self.filtered_streams), len(self.streams),
'' if self.show_offline_streams else 'NOT')
self.init_streams_pad()
self.redraw_stream_footer()
self.show_streams()
self.redraw_status()
def add_stream(self, name, url, res=None, bump=False):
ex_stream = self.find_stream(url, key='url')
if ex_stream:
if bump:
self.bump_stream(ex_stream)
else:
if bump:
seen = 1
last_seen = int(time())
else:
seen = last_seen = 0
if not self.streams:
idf = 1
else:
self.max_id += 1
idf = self.max_id
s_res = res or self.default_res
if type(s_res) == str:
actual_res = s_res
elif type(s_res) == dict:
actual_res = DEFAULT_RESOLUTION_HARD
for k,v in s_res.items():
if k in url:
actual_res = v
break
elif callable(s_res):
actual_res = s_res(url) or DEFAULT_RESOLUTION_HARD
else:
actual_res = DEFAULT_RESOLUTION_HARD
self.set_status(' Checking if new stream is online...')
self.s.refresh()
online = self._check_stream(url)
new_stream = {
'id' : idf,
'name' : name,
'seen' : seen,
'last_seen' : last_seen,
'res' : actual_res,
'url' : url,
'online' : online
}
self.streams.append(new_stream)
self.no_streams = False
self.refilter_streams()
self.sync_store()
def delete_stream(self):
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Delete stream {0}?'.format(s['name'])):
return
self.filtered_streams.remove(s)
self.streams.remove(s)
pad.deleteln()
self.sync_store()
if not self.streams:
self.no_streams = True
if not self.filtered_streams:
self.no_stream_shown = True
if pad.getyx()[0] == len(self.filtered_streams) and not self.no_stream_shown:
self.move(-1, refresh=False)
pad.chgat(curses.A_REVERSE)
self.redraw_current_line()
self.show_streams()
def reset_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Reset stream {0}?'.format(s['name'])):
return
s['seen'] = 0
s['last_seen'] = 0
self.redraw_current_line()
self.sync_store()
def edit_stream(self, attr):
prompt_info = {
'name' : 'Name',
'url' : 'URL',
'res' : 'Resolution'
}
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
new_val = self.prompt_input('{0} (empty to cancel): '.format(prompt_info[attr]))
if new_val != '':
s[attr] = new_val
self.redraw_current_line()
self.redraw_status()
self.redraw_stream_footer()
def show_commandline(self):
self.set_footer('{0}/{1} {2}'.format(self.cmd_index+1, len(self.cmd_list), ' '.join(self.cmd)))
def shift_commandline(self):
self.cmd_index += 1
if self.cmd_index == len(self.cmd_list):
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.show_commandline()
def prompt_new_stream(self):
url = self.prompt_input('New stream URL (empty to cancel): ')
name = url.split('/')[-1]
if name:
self.add_stream(name, url)
self.move(len(self.filtered_streams)-1, absolute=True, refresh=False)
self.show_streams()
def play_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
try:
self.q.put(s, self.cmd)
self.bump_stream(s, throttle=True)
self.redraw_current_line()
self.refresh_current_pad()
except Exception as e:
if type(e) == QueueDuplicate:
self.set_footer('This stream is already playing')
elif type(e) == OSError:
self.set_footer('/!\ Faulty command line: {0}'.format(e.strerror))
else:
raise e
def stop_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
p = self.q.terminate_process(s['id'])
if p:
self.redraw_current_line()
self.redraw_stream_footer()
self.redraw_status()
|
gapato/livestreamer-curses
|
src/livestreamer_curses/streamlist.py
|
StreamList.set_footer
|
python
|
def set_footer(self, msg, reverse=True):
self.s.move(self.max_y-1, 0)
if reverse:
self.overwrite_line(msg, attr=curses.A_REVERSE)
else:
self.overwrite_line(msg, attr=curses.A_NORMAL)
|
Set first footer line text
|
train
|
https://github.com/gapato/livestreamer-curses/blob/d841a421422db8c5b5a8bcfcff6b3ddd7ea8a64b/src/livestreamer_curses/streamlist.py#L398-L404
|
[
"def overwrite_line(self, msg, attr=curses.A_NORMAL):\n self.s.clrtoeol()\n self.s.addstr(msg, attr)\n self.s.chgat(attr)\n"
] |
class StreamList(object):
def __init__(self, filename, config, list_streams=False, init_stream_list=None):
""" Init and try to load a stream list, nothing about curses yet """
global TITLE_STRING
self.db_was_read = False
# Open the storage (create it if necessary)
try:
db_dir = os.path.dirname(filename)
if not os.path.exists(db_dir):
os.makedirs(db_dir)
f = shelve.open(filename, 'c')
except Exception:
raise ShelveError(
'Database could not be opened, another livestreamer-curses instance might be already running. '
'Please note that a database created with Python 2.x cannot be used with Python 3.x and vice versa.'
)
self.max_id = 0
if init_stream_list:
f['streams'] = init_stream_list
for i, s in enumerate(f['streams']):
s['id'] = s.get('id') or i
s['seen'] = s.get('seen') or 0
s['last_seen'] = s.get('last_seen') or 0
self.max_id = i
f.sync()
# Sort streams by view count
try:
self.streams = sorted(f['streams'], key=lambda s:s['seen'], reverse=True)
for s in self.streams:
# Max id, needed when adding a new stream
self.max_id = max(self.max_id, s['id'])
s['online'] = 2
if list_streams:
print(json.dumps(self.streams))
f.close()
sys.exit(0)
except:
self.streams = []
self.db_was_read = True
self.filtered_streams = list(self.streams)
self.filter = ''
self.all_streams_offline = None
self.show_offline_streams = False
self.config = config
TITLE_STRING = TITLE_STRING.format(self.config.VERSION)
self.cmd_list = list(map(shlex.split, self.config.LIVESTREAMER_COMMANDS))
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.last_autocheck = 0
self.default_res = self.config.DEFAULT_RESOLUTION
self.store = f
self.store.sync()
self.no_streams = self.streams == []
self.no_stream_shown = self.no_streams
self.q = ProcessList(StreamPlayer().play)
self.livestreamer = livestreamer.Livestreamer()
def __del__(self):
""" Stop playing streams and sync storage """
try:
self.q.terminate()
if self.db_was_read:
self.store['cmd'] = self.cmd
self.store['streams'] = self.streams
self.store.close()
except:
pass
def __call__(self, s):
# Terminal initialization
self.init(s)
# Main event loop
self.run()
def init(self, s):
""" Initialize the text interface """
# Hide cursor
curses.curs_set(0)
self.s = s
self.s.keypad(1)
self.set_screen_size()
self.pads = {}
self.offsets = {}
self.init_help()
self.init_streams_pad()
self.current_pad = 'streams'
self.set_title(TITLE_STRING)
self.got_g = False
signal.signal(28, self.resize)
if self.config.CHECK_ONLINE_ON_START:
self.check_online_streams()
self.set_status('Ready')
def getheightwidth(self):
""" getwidth() -> (int, int)
Return the height and width of the console in characters
https://groups.google.com/forum/#!msg/comp.lang.python/CpUszNNXUQM/QADpl11Z-nAJ"""
try:
return int(os.environ["LINES"]), int(os.environ["COLUMNS"])
except KeyError:
height, width = struct.unpack(
"hhhh", ioctl(0, termios.TIOCGWINSZ ,"\000"*8))[0:2]
if not height:
return 25, 80
return height, width
def resize(self, signum, obj):
""" handler for SIGWINCH """
self.s.clear()
stream_cursor = self.pads['streams'].getyx()[0]
for pad in self.pads.values():
pad.clear()
self.s.refresh()
self.set_screen_size()
self.set_title(TITLE_STRING)
self.init_help()
self.init_streams_pad()
self.move(stream_cursor, absolute=True, pad_name='streams', refresh=False)
self.s.refresh()
self.show()
def run(self):
""" Main event loop """
# Show stream list
self.show_streams()
while True:
self.s.refresh()
# See if any stream has ended
self.check_stopped_streams()
# Wait on stdin or on the streams output
souts = self.q.get_stdouts()
souts.append(sys.stdin)
try:
(r, w, x) = select.select(souts, [], [], 1)
except select.error:
continue
if not r:
if self.config.CHECK_ONLINE_INTERVAL <= 0: continue
cur_time = int(time())
time_delta = cur_time - self.last_autocheck
if time_delta > self.config.CHECK_ONLINE_INTERVAL:
self.check_online_streams()
self.set_status('Next check at {0}'.format(
strftime('%H:%M:%S', localtime(time() + self.config.CHECK_ONLINE_INTERVAL))
))
continue
for fd in r:
if fd != sys.stdin:
# Set the new status line only if non-empty
msg = fd.readline()
if msg:
self.set_status(msg[:-1])
else:
# Main event loop
c = self.pads[self.current_pad].getch()
if c == curses.KEY_UP or c == ord('k'):
self.move(-1)
elif c == curses.KEY_DOWN or c == ord('j'):
self.move(1)
elif c == ord('f'):
if self.current_pad == 'streams':
self.filter_streams()
elif c == ord('F'):
if self.current_pad == 'streams':
self.clear_filter()
elif c == ord('g'):
if self.got_g:
self.move(0, absolute=True)
self.got_g = False
continue
self.got_g = True
elif c == ord('G'):
self.move(len(self.filtered_streams)-1, absolute=True)
elif c == ord('q'):
if self.current_pad == 'streams':
self.q.terminate()
return
else:
self.show_streams()
elif c == 27: # ESC
if self.current_pad != 'streams':
self.show_streams()
if self.current_pad == 'help':
continue
elif c == 10:
self.play_stream()
elif c == ord('s'):
self.stop_stream()
elif c == ord('c'):
self.reset_stream()
elif c == ord('n'):
self.edit_stream('name')
elif c == ord('r'):
self.edit_stream('res')
elif c == ord('u'):
self.edit_stream('url')
elif c == ord('l'):
self.show_commandline()
elif c == ord('L'):
self.shift_commandline()
elif c == ord('a'):
self.prompt_new_stream()
elif c == ord('d'):
self.delete_stream()
elif c == ord('o'):
self.show_offline_streams ^= True
self.refilter_streams()
elif c == ord('O'):
self.check_online_streams()
elif c == ord('h') or c == ord('?'):
self.show_help()
def set_screen_size(self):
""" Setup screen size and padding
We have need 2 free lines at the top and 2 free lines at the bottom
"""
height, width = self.getheightwidth()
curses.resizeterm(height, width)
self.pad_x = 0
self.max_y, self.max_x = (height-1, width-1)
self.pad_h = height-3
self.pad_w = width-2*self.pad_x
def overwrite_line(self, msg, attr=curses.A_NORMAL):
self.s.clrtoeol()
self.s.addstr(msg, attr)
self.s.chgat(attr)
def set_title(self, msg):
""" Set first header line text """
self.s.move(0, 0)
self.overwrite_line(msg, curses.A_REVERSE)
def set_header(self, msg):
""" Set second head line text """
self.s.move(1, 0)
self.overwrite_line(msg, attr=curses.A_NORMAL)
def clear_footer(self):
self.s.move(self.max_y-1, 0)
self.overwrite_line('')
def init_help(self):
help_pad_length = 27 # there should be a neater way to do this
h = curses.newpad(help_pad_length, self.pad_w)
h.keypad(1)
h.addstr( 0, 0, 'STREAM MANAGEMENT', curses.A_BOLD)
h.addstr( 2, 0, ' Enter : start stream')
h.addstr( 3, 0, ' s : stop stream')
h.addstr( 4, 0, ' r : change stream resolution')
h.addstr( 5, 0, ' n : change stream name')
h.addstr( 6, 0, ' u : change stream URL')
h.addstr( 7, 0, ' c : reset stream view count')
h.addstr( 8, 0, ' a : add stream')
h.addstr( 9, 0, ' d : delete stream')
h.addstr(11, 0, ' l : show command line')
h.addstr(12, 0, ' L : cycle command line')
h.addstr(15, 0, 'NAVIGATION', curses.A_BOLD)
h.addstr(17, 0, ' j/up : up one line')
h.addstr(18, 0, ' k/down: down one line')
h.addstr(19, 0, ' f : filter streams')
h.addstr(20, 0, ' F : clear filter')
h.addstr(21, 0, ' o : toggle offline streams')
h.addstr(22, 0, ' O : check for online streams')
h.addstr(23, 0, ' gg : go to top')
h.addstr(24, 0, ' G : go to bottom')
h.addstr(25, 0, ' h/? : show this help')
h.addstr(26, 0, ' q : quit')
self.pads['help'] = h
self.offsets['help'] = 0
def show(self):
funcs = {
'streams' : self.show_streams,
'help' : self.show_help
}
funcs[self.current_pad]()
def show_help(self):
""" Redraw Help screen and wait for any input to leave """
self.s.move(1,0)
self.s.clrtobot()
self.set_header('Help'.center(self.pad_w))
self.set_footer(' ESC or \'q\' to return to main menu')
self.s.refresh()
self.current_pad = 'help'
self.refresh_current_pad()
def init_streams_pad(self, start_row=0):
""" Create a curses pad and populate it with a line by stream """
y = 0
pad = curses.newpad(max(1,len(self.filtered_streams)), self.pad_w)
pad.keypad(1)
for s in self.filtered_streams:
pad.addstr(y, 0, self.format_stream_line(s))
y+=1
self.offsets['streams'] = 0
pad.move(start_row, 0)
if not self.no_stream_shown:
pad.chgat(curses.A_REVERSE)
self.pads['streams'] = pad
def show_streams(self):
self.s.move(1,0)
self.s.clrtobot()
self.current_pad = 'streams'
if self.no_stream_shown:
self.hide_streams_pad()
if self.no_streams:
self.s.addstr(5, 5, 'It seems you don\'t have any stream yet')
self.s.addstr(6, 5, 'Hit \'a\' to add a new one')
self.s.addstr(8, 5, 'Hit \'?\' for help')
elif self.all_streams_offline and not self.show_offline_streams:
self.s.addstr(5, 5, 'All streams are currently offline')
self.s.addstr(6, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(7, 5, 'Hit \'O\' to refresh')
self.s.addstr(9, 5, 'Hit \'?\' for help')
else:
self.s.addstr(5, 5, 'No stream matches your filter')
self.s.addstr(6, 5, 'Hit \'f\' to change filter')
self.s.addstr(7, 5, 'Hit \'F\' to clear')
self.s.addstr(8, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(10, 5, 'Hit \'?\' for help')
else:
idf = 'ID'.center(ID_FIELD_WIDTH)
name = 'Name'.center(NAME_FIELD_WIDTH)
res = 'Resolution'.center(RES_FIELD_WIDTH)
views = 'Views'.center(VIEWS_FIELD_WIDTH)
self.set_header('{0} {1} {2} {3} Status'.format(idf, name, res, views))
self.redraw_stream_footer()
self.redraw_status()
self.s.refresh()
if not self.no_stream_shown:
self.refresh_current_pad()
def hide_streams_pad(self):
pad = self.pads.get('streams')
if pad:
pad.refresh(0, 0, 2, 0, 2, 0)
def refresh_current_pad(self):
pad = self.pads[self.current_pad]
pad.refresh(self.offsets[self.current_pad], 0, 2, self.pad_x, self.pad_h, self.pad_w)
def move(self, direction, absolute=False, pad_name=None, refresh=True):
""" Scroll the current pad
direction : (int) move by one in the given direction
-1 is up, 1 is down. If absolute is True,
go to position direction.
Behaviour is affected by cursor_line and scroll_only below
absolute : (bool)
"""
# pad in this lists have the current line highlighted
cursor_line = [ 'streams' ]
# pads in this list will be moved screen-wise as opposed to line-wise
# if absolute is set, will go all the way top or all the way down depending
# on direction
scroll_only = [ 'help' ]
if not pad_name:
pad_name = self.current_pad
pad = self.pads[pad_name]
if pad_name == 'streams' and self.no_streams:
return
(row, col) = pad.getyx()
new_row = row
offset = self.offsets[pad_name]
new_offset = offset
if pad_name in scroll_only:
if absolute:
if direction > 0:
new_offset = pad.getmaxyx()[0] - self.pad_h + 1
else:
new_offset = 0
else:
if direction > 0:
new_offset = min(pad.getmaxyx()[0] - self.pad_h + 1, offset + self.pad_h)
elif offset > 0:
new_offset = max(0, offset - self.pad_h)
else:
if absolute and direction >= 0 and direction < pad.getmaxyx()[0]:
if direction < offset:
new_offset = direction
elif direction > offset + self.pad_h - 2:
new_offset = direction - self.pad_h + 2
new_row = direction
else:
if direction == -1 and row > 0:
if row == offset:
new_offset -= 1
new_row = row-1
elif direction == 1 and row < len(self.filtered_streams)-1:
if row == offset + self.pad_h - 2:
new_offset += 1
new_row = row+1
if pad_name in cursor_line:
pad.move(row, 0)
pad.chgat(curses.A_NORMAL)
self.offsets[pad_name] = new_offset
pad.move(new_row, 0)
if pad_name in cursor_line:
pad.chgat(curses.A_REVERSE)
if pad_name == 'streams':
self.redraw_stream_footer()
if refresh:
self.refresh_current_pad()
def format_stream_line(self, stream):
idf = '{0} '.format(stream['id']).rjust(ID_FIELD_WIDTH)
name = ' {0}'.format(stream['name'][:NAME_FIELD_WIDTH-2]).ljust(NAME_FIELD_WIDTH)
res = ' {0}'.format(stream['res'][:RES_FIELD_WIDTH-2]).ljust(RES_FIELD_WIDTH)
views = '{0} '.format(stream['seen']).rjust(VIEWS_FIELD_WIDTH)
p = self.q.get_process(stream['id']) != None
if p:
indicator = self.config.INDICATORS[4] # playing
else:
indicator = self.config.INDICATORS[stream['online']]
return '{0} {1} {2} {3} {4}'.format(idf, name, res, views, indicator)
def redraw_current_line(self):
""" Redraw the highlighted line """
if self.no_streams:
return
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
pad = self.pads['streams']
pad.move(row, 0)
pad.clrtoeol()
pad.addstr(row, 0, self.format_stream_line(s), curses.A_REVERSE)
pad.chgat(curses.A_REVERSE)
pad.move(row, 0)
self.refresh_current_pad()
def set_status(self, status):
self.status = status
self.redraw_status()
def redraw_status(self):
self.s.move(self.max_y, 0)
self.overwrite_line(self.status[:self.max_x], curses.A_NORMAL)
self.s.refresh()
def redraw_stream_footer(self):
if not self.no_stream_shown:
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
self.set_footer('{0}/{1} {2} {3}'.format(row+1, len(self.filtered_streams), s['url'], s['res']))
self.s.refresh()
def check_stopped_streams(self):
finished = self.q.get_finished()
for f in finished:
for s in self.streams:
try:
i = self.filtered_streams.index(s)
except ValueError:
continue
if f == s['id']:
self.set_footer('Stream {0} has stopped'.format(s['name']))
if i == self.pads[self.current_pad].getyx()[0]:
attr = curses.A_REVERSE
else:
attr = curses.A_NORMAL
self.pads['streams'].addstr(i, PLAYING_FIELD_OFFSET,
self.config.INDICATORS[s['online']], attr)
self.refresh_current_pad()
def _check_stream(self, url):
try:
plugin = self.livestreamer.resolve_url(url)
avail_streams = plugin.get_streams()
if avail_streams:
return 1
return 0
except:
return 3
def check_online_streams(self):
self.all_streams_offline = True
self.set_status(' Checking online streams...')
done_queue = queue.Queue()
def check_stream_managed(args):
url, queue = args
status = self._check_stream(url)
done_queue.put(url)
return status
pool = Pool(self.config.CHECK_ONLINE_THREADS)
args = [(s['url'], done_queue) for s in self.streams]
statuses = pool.map_async(check_stream_managed, args)
n_streams = len(self.streams)
while not statuses.ready():
sleep(0.1)
self.set_status(' Checked {0}/{1} streams...'.format(done_queue.qsize(), n_streams))
self.s.refresh()
statuses = statuses.get()
for i, s in enumerate(self.streams):
s['online'] = statuses[i]
if s['online']:
self.all_streams_offline = False
self.refilter_streams()
self.last_autocheck = int(time())
pool.close()
def prompt_input(self, prompt=''):
self.s.move(self.max_y, 0)
self.s.clrtoeol()
self.s.addstr(prompt)
curses.curs_set(1)
curses.echo()
r = self.s.getstr().decode()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y, 0)
self.s.clrtoeol()
return r
def prompt_confirmation(self, prompt='', def_yes=False):
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if def_yes:
hint = '[y]/n'
else:
hint = 'y/[n]'
self.s.addstr('{0} {1} '.format(prompt, hint))
curses.curs_set(1)
curses.echo()
r = self.s.getch()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if r == ord('y'):
return True
elif r == ord('n'):
return False
else:
return def_yes
def sync_store(self):
self.store['streams'] = self.streams
self.store.sync()
def bump_stream(self, stream, throttle=False):
t = int(time())
# only bump if stream was last started some time ago
if throttle and t - stream['last_seen'] < 60*1:
return
stream['seen'] += 1
stream['last_seen'] = t
self.sync_store()
def find_stream(self, sel, key='id'):
for s in self.streams:
if s[key] == sel:
return s
return None
def clear_filter(self):
self.filter = ''
self.refilter_streams()
def filter_streams(self):
self.filter = self.prompt_input('Filter: ').lower()
self.refilter_streams()
def refilter_streams(self, quiet=False):
self.filtered_streams = []
for s in self.streams:
if ((self.show_offline_streams or s['online'] in [1,2])
and (self.filter in s['name'].lower() or self.filter in s['url'].lower())):
self.filtered_streams.append(s)
self.filtered_streams.sort(key=lambda s:s['seen'], reverse=True)
self.no_stream_shown = len(self.filtered_streams) == 0
if not quiet:
self.status = ' Filter: {0} ({1}/{2} matches, {3} showing offline streams)'.format(
self.filter or '<empty>', len(self.filtered_streams), len(self.streams),
'' if self.show_offline_streams else 'NOT')
self.init_streams_pad()
self.redraw_stream_footer()
self.show_streams()
self.redraw_status()
def add_stream(self, name, url, res=None, bump=False):
ex_stream = self.find_stream(url, key='url')
if ex_stream:
if bump:
self.bump_stream(ex_stream)
else:
if bump:
seen = 1
last_seen = int(time())
else:
seen = last_seen = 0
if not self.streams:
idf = 1
else:
self.max_id += 1
idf = self.max_id
s_res = res or self.default_res
if type(s_res) == str:
actual_res = s_res
elif type(s_res) == dict:
actual_res = DEFAULT_RESOLUTION_HARD
for k,v in s_res.items():
if k in url:
actual_res = v
break
elif callable(s_res):
actual_res = s_res(url) or DEFAULT_RESOLUTION_HARD
else:
actual_res = DEFAULT_RESOLUTION_HARD
self.set_status(' Checking if new stream is online...')
self.s.refresh()
online = self._check_stream(url)
new_stream = {
'id' : idf,
'name' : name,
'seen' : seen,
'last_seen' : last_seen,
'res' : actual_res,
'url' : url,
'online' : online
}
self.streams.append(new_stream)
self.no_streams = False
self.refilter_streams()
self.sync_store()
def delete_stream(self):
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Delete stream {0}?'.format(s['name'])):
return
self.filtered_streams.remove(s)
self.streams.remove(s)
pad.deleteln()
self.sync_store()
if not self.streams:
self.no_streams = True
if not self.filtered_streams:
self.no_stream_shown = True
if pad.getyx()[0] == len(self.filtered_streams) and not self.no_stream_shown:
self.move(-1, refresh=False)
pad.chgat(curses.A_REVERSE)
self.redraw_current_line()
self.show_streams()
def reset_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Reset stream {0}?'.format(s['name'])):
return
s['seen'] = 0
s['last_seen'] = 0
self.redraw_current_line()
self.sync_store()
def edit_stream(self, attr):
prompt_info = {
'name' : 'Name',
'url' : 'URL',
'res' : 'Resolution'
}
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
new_val = self.prompt_input('{0} (empty to cancel): '.format(prompt_info[attr]))
if new_val != '':
s[attr] = new_val
self.redraw_current_line()
self.redraw_status()
self.redraw_stream_footer()
def show_commandline(self):
self.set_footer('{0}/{1} {2}'.format(self.cmd_index+1, len(self.cmd_list), ' '.join(self.cmd)))
def shift_commandline(self):
self.cmd_index += 1
if self.cmd_index == len(self.cmd_list):
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.show_commandline()
def prompt_new_stream(self):
url = self.prompt_input('New stream URL (empty to cancel): ')
name = url.split('/')[-1]
if name:
self.add_stream(name, url)
self.move(len(self.filtered_streams)-1, absolute=True, refresh=False)
self.show_streams()
def play_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
try:
self.q.put(s, self.cmd)
self.bump_stream(s, throttle=True)
self.redraw_current_line()
self.refresh_current_pad()
except Exception as e:
if type(e) == QueueDuplicate:
self.set_footer('This stream is already playing')
elif type(e) == OSError:
self.set_footer('/!\ Faulty command line: {0}'.format(e.strerror))
else:
raise e
def stop_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
p = self.q.terminate_process(s['id'])
if p:
self.redraw_current_line()
self.redraw_stream_footer()
self.redraw_status()
|
gapato/livestreamer-curses
|
src/livestreamer_curses/streamlist.py
|
StreamList.show_help
|
python
|
def show_help(self):
self.s.move(1,0)
self.s.clrtobot()
self.set_header('Help'.center(self.pad_w))
self.set_footer(' ESC or \'q\' to return to main menu')
self.s.refresh()
self.current_pad = 'help'
self.refresh_current_pad()
|
Redraw Help screen and wait for any input to leave
|
train
|
https://github.com/gapato/livestreamer-curses/blob/d841a421422db8c5b5a8bcfcff6b3ddd7ea8a64b/src/livestreamer_curses/streamlist.py#L450-L458
|
[
"def set_header(self, msg):\n \"\"\" Set second head line text \"\"\"\n self.s.move(1, 0)\n self.overwrite_line(msg, attr=curses.A_NORMAL)\n",
"def set_footer(self, msg, reverse=True):\n \"\"\" Set first footer line text \"\"\"\n self.s.move(self.max_y-1, 0)\n if reverse:\n self.overwrite_line(msg, attr=curses.A_REVERSE)\n else:\n self.overwrite_line(msg, attr=curses.A_NORMAL)\n",
"def refresh_current_pad(self):\n pad = self.pads[self.current_pad]\n pad.refresh(self.offsets[self.current_pad], 0, 2, self.pad_x, self.pad_h, self.pad_w)\n"
] |
class StreamList(object):
def __init__(self, filename, config, list_streams=False, init_stream_list=None):
""" Init and try to load a stream list, nothing about curses yet """
global TITLE_STRING
self.db_was_read = False
# Open the storage (create it if necessary)
try:
db_dir = os.path.dirname(filename)
if not os.path.exists(db_dir):
os.makedirs(db_dir)
f = shelve.open(filename, 'c')
except Exception:
raise ShelveError(
'Database could not be opened, another livestreamer-curses instance might be already running. '
'Please note that a database created with Python 2.x cannot be used with Python 3.x and vice versa.'
)
self.max_id = 0
if init_stream_list:
f['streams'] = init_stream_list
for i, s in enumerate(f['streams']):
s['id'] = s.get('id') or i
s['seen'] = s.get('seen') or 0
s['last_seen'] = s.get('last_seen') or 0
self.max_id = i
f.sync()
# Sort streams by view count
try:
self.streams = sorted(f['streams'], key=lambda s:s['seen'], reverse=True)
for s in self.streams:
# Max id, needed when adding a new stream
self.max_id = max(self.max_id, s['id'])
s['online'] = 2
if list_streams:
print(json.dumps(self.streams))
f.close()
sys.exit(0)
except:
self.streams = []
self.db_was_read = True
self.filtered_streams = list(self.streams)
self.filter = ''
self.all_streams_offline = None
self.show_offline_streams = False
self.config = config
TITLE_STRING = TITLE_STRING.format(self.config.VERSION)
self.cmd_list = list(map(shlex.split, self.config.LIVESTREAMER_COMMANDS))
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.last_autocheck = 0
self.default_res = self.config.DEFAULT_RESOLUTION
self.store = f
self.store.sync()
self.no_streams = self.streams == []
self.no_stream_shown = self.no_streams
self.q = ProcessList(StreamPlayer().play)
self.livestreamer = livestreamer.Livestreamer()
def __del__(self):
""" Stop playing streams and sync storage """
try:
self.q.terminate()
if self.db_was_read:
self.store['cmd'] = self.cmd
self.store['streams'] = self.streams
self.store.close()
except:
pass
def __call__(self, s):
# Terminal initialization
self.init(s)
# Main event loop
self.run()
def init(self, s):
""" Initialize the text interface """
# Hide cursor
curses.curs_set(0)
self.s = s
self.s.keypad(1)
self.set_screen_size()
self.pads = {}
self.offsets = {}
self.init_help()
self.init_streams_pad()
self.current_pad = 'streams'
self.set_title(TITLE_STRING)
self.got_g = False
signal.signal(28, self.resize)
if self.config.CHECK_ONLINE_ON_START:
self.check_online_streams()
self.set_status('Ready')
def getheightwidth(self):
""" getwidth() -> (int, int)
Return the height and width of the console in characters
https://groups.google.com/forum/#!msg/comp.lang.python/CpUszNNXUQM/QADpl11Z-nAJ"""
try:
return int(os.environ["LINES"]), int(os.environ["COLUMNS"])
except KeyError:
height, width = struct.unpack(
"hhhh", ioctl(0, termios.TIOCGWINSZ ,"\000"*8))[0:2]
if not height:
return 25, 80
return height, width
def resize(self, signum, obj):
""" handler for SIGWINCH """
self.s.clear()
stream_cursor = self.pads['streams'].getyx()[0]
for pad in self.pads.values():
pad.clear()
self.s.refresh()
self.set_screen_size()
self.set_title(TITLE_STRING)
self.init_help()
self.init_streams_pad()
self.move(stream_cursor, absolute=True, pad_name='streams', refresh=False)
self.s.refresh()
self.show()
def run(self):
""" Main event loop """
# Show stream list
self.show_streams()
while True:
self.s.refresh()
# See if any stream has ended
self.check_stopped_streams()
# Wait on stdin or on the streams output
souts = self.q.get_stdouts()
souts.append(sys.stdin)
try:
(r, w, x) = select.select(souts, [], [], 1)
except select.error:
continue
if not r:
if self.config.CHECK_ONLINE_INTERVAL <= 0: continue
cur_time = int(time())
time_delta = cur_time - self.last_autocheck
if time_delta > self.config.CHECK_ONLINE_INTERVAL:
self.check_online_streams()
self.set_status('Next check at {0}'.format(
strftime('%H:%M:%S', localtime(time() + self.config.CHECK_ONLINE_INTERVAL))
))
continue
for fd in r:
if fd != sys.stdin:
# Set the new status line only if non-empty
msg = fd.readline()
if msg:
self.set_status(msg[:-1])
else:
# Main event loop
c = self.pads[self.current_pad].getch()
if c == curses.KEY_UP or c == ord('k'):
self.move(-1)
elif c == curses.KEY_DOWN or c == ord('j'):
self.move(1)
elif c == ord('f'):
if self.current_pad == 'streams':
self.filter_streams()
elif c == ord('F'):
if self.current_pad == 'streams':
self.clear_filter()
elif c == ord('g'):
if self.got_g:
self.move(0, absolute=True)
self.got_g = False
continue
self.got_g = True
elif c == ord('G'):
self.move(len(self.filtered_streams)-1, absolute=True)
elif c == ord('q'):
if self.current_pad == 'streams':
self.q.terminate()
return
else:
self.show_streams()
elif c == 27: # ESC
if self.current_pad != 'streams':
self.show_streams()
if self.current_pad == 'help':
continue
elif c == 10:
self.play_stream()
elif c == ord('s'):
self.stop_stream()
elif c == ord('c'):
self.reset_stream()
elif c == ord('n'):
self.edit_stream('name')
elif c == ord('r'):
self.edit_stream('res')
elif c == ord('u'):
self.edit_stream('url')
elif c == ord('l'):
self.show_commandline()
elif c == ord('L'):
self.shift_commandline()
elif c == ord('a'):
self.prompt_new_stream()
elif c == ord('d'):
self.delete_stream()
elif c == ord('o'):
self.show_offline_streams ^= True
self.refilter_streams()
elif c == ord('O'):
self.check_online_streams()
elif c == ord('h') or c == ord('?'):
self.show_help()
def set_screen_size(self):
""" Setup screen size and padding
We have need 2 free lines at the top and 2 free lines at the bottom
"""
height, width = self.getheightwidth()
curses.resizeterm(height, width)
self.pad_x = 0
self.max_y, self.max_x = (height-1, width-1)
self.pad_h = height-3
self.pad_w = width-2*self.pad_x
def overwrite_line(self, msg, attr=curses.A_NORMAL):
self.s.clrtoeol()
self.s.addstr(msg, attr)
self.s.chgat(attr)
def set_title(self, msg):
""" Set first header line text """
self.s.move(0, 0)
self.overwrite_line(msg, curses.A_REVERSE)
def set_header(self, msg):
""" Set second head line text """
self.s.move(1, 0)
self.overwrite_line(msg, attr=curses.A_NORMAL)
def set_footer(self, msg, reverse=True):
""" Set first footer line text """
self.s.move(self.max_y-1, 0)
if reverse:
self.overwrite_line(msg, attr=curses.A_REVERSE)
else:
self.overwrite_line(msg, attr=curses.A_NORMAL)
def clear_footer(self):
self.s.move(self.max_y-1, 0)
self.overwrite_line('')
def init_help(self):
help_pad_length = 27 # there should be a neater way to do this
h = curses.newpad(help_pad_length, self.pad_w)
h.keypad(1)
h.addstr( 0, 0, 'STREAM MANAGEMENT', curses.A_BOLD)
h.addstr( 2, 0, ' Enter : start stream')
h.addstr( 3, 0, ' s : stop stream')
h.addstr( 4, 0, ' r : change stream resolution')
h.addstr( 5, 0, ' n : change stream name')
h.addstr( 6, 0, ' u : change stream URL')
h.addstr( 7, 0, ' c : reset stream view count')
h.addstr( 8, 0, ' a : add stream')
h.addstr( 9, 0, ' d : delete stream')
h.addstr(11, 0, ' l : show command line')
h.addstr(12, 0, ' L : cycle command line')
h.addstr(15, 0, 'NAVIGATION', curses.A_BOLD)
h.addstr(17, 0, ' j/up : up one line')
h.addstr(18, 0, ' k/down: down one line')
h.addstr(19, 0, ' f : filter streams')
h.addstr(20, 0, ' F : clear filter')
h.addstr(21, 0, ' o : toggle offline streams')
h.addstr(22, 0, ' O : check for online streams')
h.addstr(23, 0, ' gg : go to top')
h.addstr(24, 0, ' G : go to bottom')
h.addstr(25, 0, ' h/? : show this help')
h.addstr(26, 0, ' q : quit')
self.pads['help'] = h
self.offsets['help'] = 0
def show(self):
funcs = {
'streams' : self.show_streams,
'help' : self.show_help
}
funcs[self.current_pad]()
def init_streams_pad(self, start_row=0):
""" Create a curses pad and populate it with a line by stream """
y = 0
pad = curses.newpad(max(1,len(self.filtered_streams)), self.pad_w)
pad.keypad(1)
for s in self.filtered_streams:
pad.addstr(y, 0, self.format_stream_line(s))
y+=1
self.offsets['streams'] = 0
pad.move(start_row, 0)
if not self.no_stream_shown:
pad.chgat(curses.A_REVERSE)
self.pads['streams'] = pad
def show_streams(self):
self.s.move(1,0)
self.s.clrtobot()
self.current_pad = 'streams'
if self.no_stream_shown:
self.hide_streams_pad()
if self.no_streams:
self.s.addstr(5, 5, 'It seems you don\'t have any stream yet')
self.s.addstr(6, 5, 'Hit \'a\' to add a new one')
self.s.addstr(8, 5, 'Hit \'?\' for help')
elif self.all_streams_offline and not self.show_offline_streams:
self.s.addstr(5, 5, 'All streams are currently offline')
self.s.addstr(6, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(7, 5, 'Hit \'O\' to refresh')
self.s.addstr(9, 5, 'Hit \'?\' for help')
else:
self.s.addstr(5, 5, 'No stream matches your filter')
self.s.addstr(6, 5, 'Hit \'f\' to change filter')
self.s.addstr(7, 5, 'Hit \'F\' to clear')
self.s.addstr(8, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(10, 5, 'Hit \'?\' for help')
else:
idf = 'ID'.center(ID_FIELD_WIDTH)
name = 'Name'.center(NAME_FIELD_WIDTH)
res = 'Resolution'.center(RES_FIELD_WIDTH)
views = 'Views'.center(VIEWS_FIELD_WIDTH)
self.set_header('{0} {1} {2} {3} Status'.format(idf, name, res, views))
self.redraw_stream_footer()
self.redraw_status()
self.s.refresh()
if not self.no_stream_shown:
self.refresh_current_pad()
def hide_streams_pad(self):
pad = self.pads.get('streams')
if pad:
pad.refresh(0, 0, 2, 0, 2, 0)
def refresh_current_pad(self):
pad = self.pads[self.current_pad]
pad.refresh(self.offsets[self.current_pad], 0, 2, self.pad_x, self.pad_h, self.pad_w)
def move(self, direction, absolute=False, pad_name=None, refresh=True):
""" Scroll the current pad
direction : (int) move by one in the given direction
-1 is up, 1 is down. If absolute is True,
go to position direction.
Behaviour is affected by cursor_line and scroll_only below
absolute : (bool)
"""
# pad in this lists have the current line highlighted
cursor_line = [ 'streams' ]
# pads in this list will be moved screen-wise as opposed to line-wise
# if absolute is set, will go all the way top or all the way down depending
# on direction
scroll_only = [ 'help' ]
if not pad_name:
pad_name = self.current_pad
pad = self.pads[pad_name]
if pad_name == 'streams' and self.no_streams:
return
(row, col) = pad.getyx()
new_row = row
offset = self.offsets[pad_name]
new_offset = offset
if pad_name in scroll_only:
if absolute:
if direction > 0:
new_offset = pad.getmaxyx()[0] - self.pad_h + 1
else:
new_offset = 0
else:
if direction > 0:
new_offset = min(pad.getmaxyx()[0] - self.pad_h + 1, offset + self.pad_h)
elif offset > 0:
new_offset = max(0, offset - self.pad_h)
else:
if absolute and direction >= 0 and direction < pad.getmaxyx()[0]:
if direction < offset:
new_offset = direction
elif direction > offset + self.pad_h - 2:
new_offset = direction - self.pad_h + 2
new_row = direction
else:
if direction == -1 and row > 0:
if row == offset:
new_offset -= 1
new_row = row-1
elif direction == 1 and row < len(self.filtered_streams)-1:
if row == offset + self.pad_h - 2:
new_offset += 1
new_row = row+1
if pad_name in cursor_line:
pad.move(row, 0)
pad.chgat(curses.A_NORMAL)
self.offsets[pad_name] = new_offset
pad.move(new_row, 0)
if pad_name in cursor_line:
pad.chgat(curses.A_REVERSE)
if pad_name == 'streams':
self.redraw_stream_footer()
if refresh:
self.refresh_current_pad()
def format_stream_line(self, stream):
idf = '{0} '.format(stream['id']).rjust(ID_FIELD_WIDTH)
name = ' {0}'.format(stream['name'][:NAME_FIELD_WIDTH-2]).ljust(NAME_FIELD_WIDTH)
res = ' {0}'.format(stream['res'][:RES_FIELD_WIDTH-2]).ljust(RES_FIELD_WIDTH)
views = '{0} '.format(stream['seen']).rjust(VIEWS_FIELD_WIDTH)
p = self.q.get_process(stream['id']) != None
if p:
indicator = self.config.INDICATORS[4] # playing
else:
indicator = self.config.INDICATORS[stream['online']]
return '{0} {1} {2} {3} {4}'.format(idf, name, res, views, indicator)
def redraw_current_line(self):
""" Redraw the highlighted line """
if self.no_streams:
return
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
pad = self.pads['streams']
pad.move(row, 0)
pad.clrtoeol()
pad.addstr(row, 0, self.format_stream_line(s), curses.A_REVERSE)
pad.chgat(curses.A_REVERSE)
pad.move(row, 0)
self.refresh_current_pad()
def set_status(self, status):
self.status = status
self.redraw_status()
def redraw_status(self):
self.s.move(self.max_y, 0)
self.overwrite_line(self.status[:self.max_x], curses.A_NORMAL)
self.s.refresh()
def redraw_stream_footer(self):
if not self.no_stream_shown:
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
self.set_footer('{0}/{1} {2} {3}'.format(row+1, len(self.filtered_streams), s['url'], s['res']))
self.s.refresh()
def check_stopped_streams(self):
finished = self.q.get_finished()
for f in finished:
for s in self.streams:
try:
i = self.filtered_streams.index(s)
except ValueError:
continue
if f == s['id']:
self.set_footer('Stream {0} has stopped'.format(s['name']))
if i == self.pads[self.current_pad].getyx()[0]:
attr = curses.A_REVERSE
else:
attr = curses.A_NORMAL
self.pads['streams'].addstr(i, PLAYING_FIELD_OFFSET,
self.config.INDICATORS[s['online']], attr)
self.refresh_current_pad()
def _check_stream(self, url):
try:
plugin = self.livestreamer.resolve_url(url)
avail_streams = plugin.get_streams()
if avail_streams:
return 1
return 0
except:
return 3
def check_online_streams(self):
self.all_streams_offline = True
self.set_status(' Checking online streams...')
done_queue = queue.Queue()
def check_stream_managed(args):
url, queue = args
status = self._check_stream(url)
done_queue.put(url)
return status
pool = Pool(self.config.CHECK_ONLINE_THREADS)
args = [(s['url'], done_queue) for s in self.streams]
statuses = pool.map_async(check_stream_managed, args)
n_streams = len(self.streams)
while not statuses.ready():
sleep(0.1)
self.set_status(' Checked {0}/{1} streams...'.format(done_queue.qsize(), n_streams))
self.s.refresh()
statuses = statuses.get()
for i, s in enumerate(self.streams):
s['online'] = statuses[i]
if s['online']:
self.all_streams_offline = False
self.refilter_streams()
self.last_autocheck = int(time())
pool.close()
def prompt_input(self, prompt=''):
self.s.move(self.max_y, 0)
self.s.clrtoeol()
self.s.addstr(prompt)
curses.curs_set(1)
curses.echo()
r = self.s.getstr().decode()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y, 0)
self.s.clrtoeol()
return r
def prompt_confirmation(self, prompt='', def_yes=False):
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if def_yes:
hint = '[y]/n'
else:
hint = 'y/[n]'
self.s.addstr('{0} {1} '.format(prompt, hint))
curses.curs_set(1)
curses.echo()
r = self.s.getch()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if r == ord('y'):
return True
elif r == ord('n'):
return False
else:
return def_yes
def sync_store(self):
self.store['streams'] = self.streams
self.store.sync()
def bump_stream(self, stream, throttle=False):
t = int(time())
# only bump if stream was last started some time ago
if throttle and t - stream['last_seen'] < 60*1:
return
stream['seen'] += 1
stream['last_seen'] = t
self.sync_store()
def find_stream(self, sel, key='id'):
for s in self.streams:
if s[key] == sel:
return s
return None
def clear_filter(self):
self.filter = ''
self.refilter_streams()
def filter_streams(self):
self.filter = self.prompt_input('Filter: ').lower()
self.refilter_streams()
def refilter_streams(self, quiet=False):
self.filtered_streams = []
for s in self.streams:
if ((self.show_offline_streams or s['online'] in [1,2])
and (self.filter in s['name'].lower() or self.filter in s['url'].lower())):
self.filtered_streams.append(s)
self.filtered_streams.sort(key=lambda s:s['seen'], reverse=True)
self.no_stream_shown = len(self.filtered_streams) == 0
if not quiet:
self.status = ' Filter: {0} ({1}/{2} matches, {3} showing offline streams)'.format(
self.filter or '<empty>', len(self.filtered_streams), len(self.streams),
'' if self.show_offline_streams else 'NOT')
self.init_streams_pad()
self.redraw_stream_footer()
self.show_streams()
self.redraw_status()
def add_stream(self, name, url, res=None, bump=False):
ex_stream = self.find_stream(url, key='url')
if ex_stream:
if bump:
self.bump_stream(ex_stream)
else:
if bump:
seen = 1
last_seen = int(time())
else:
seen = last_seen = 0
if not self.streams:
idf = 1
else:
self.max_id += 1
idf = self.max_id
s_res = res or self.default_res
if type(s_res) == str:
actual_res = s_res
elif type(s_res) == dict:
actual_res = DEFAULT_RESOLUTION_HARD
for k,v in s_res.items():
if k in url:
actual_res = v
break
elif callable(s_res):
actual_res = s_res(url) or DEFAULT_RESOLUTION_HARD
else:
actual_res = DEFAULT_RESOLUTION_HARD
self.set_status(' Checking if new stream is online...')
self.s.refresh()
online = self._check_stream(url)
new_stream = {
'id' : idf,
'name' : name,
'seen' : seen,
'last_seen' : last_seen,
'res' : actual_res,
'url' : url,
'online' : online
}
self.streams.append(new_stream)
self.no_streams = False
self.refilter_streams()
self.sync_store()
def delete_stream(self):
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Delete stream {0}?'.format(s['name'])):
return
self.filtered_streams.remove(s)
self.streams.remove(s)
pad.deleteln()
self.sync_store()
if not self.streams:
self.no_streams = True
if not self.filtered_streams:
self.no_stream_shown = True
if pad.getyx()[0] == len(self.filtered_streams) and not self.no_stream_shown:
self.move(-1, refresh=False)
pad.chgat(curses.A_REVERSE)
self.redraw_current_line()
self.show_streams()
def reset_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Reset stream {0}?'.format(s['name'])):
return
s['seen'] = 0
s['last_seen'] = 0
self.redraw_current_line()
self.sync_store()
def edit_stream(self, attr):
prompt_info = {
'name' : 'Name',
'url' : 'URL',
'res' : 'Resolution'
}
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
new_val = self.prompt_input('{0} (empty to cancel): '.format(prompt_info[attr]))
if new_val != '':
s[attr] = new_val
self.redraw_current_line()
self.redraw_status()
self.redraw_stream_footer()
def show_commandline(self):
self.set_footer('{0}/{1} {2}'.format(self.cmd_index+1, len(self.cmd_list), ' '.join(self.cmd)))
def shift_commandline(self):
self.cmd_index += 1
if self.cmd_index == len(self.cmd_list):
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.show_commandline()
def prompt_new_stream(self):
url = self.prompt_input('New stream URL (empty to cancel): ')
name = url.split('/')[-1]
if name:
self.add_stream(name, url)
self.move(len(self.filtered_streams)-1, absolute=True, refresh=False)
self.show_streams()
def play_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
try:
self.q.put(s, self.cmd)
self.bump_stream(s, throttle=True)
self.redraw_current_line()
self.refresh_current_pad()
except Exception as e:
if type(e) == QueueDuplicate:
self.set_footer('This stream is already playing')
elif type(e) == OSError:
self.set_footer('/!\ Faulty command line: {0}'.format(e.strerror))
else:
raise e
def stop_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
p = self.q.terminate_process(s['id'])
if p:
self.redraw_current_line()
self.redraw_stream_footer()
self.redraw_status()
|
gapato/livestreamer-curses
|
src/livestreamer_curses/streamlist.py
|
StreamList.init_streams_pad
|
python
|
def init_streams_pad(self, start_row=0):
y = 0
pad = curses.newpad(max(1,len(self.filtered_streams)), self.pad_w)
pad.keypad(1)
for s in self.filtered_streams:
pad.addstr(y, 0, self.format_stream_line(s))
y+=1
self.offsets['streams'] = 0
pad.move(start_row, 0)
if not self.no_stream_shown:
pad.chgat(curses.A_REVERSE)
self.pads['streams'] = pad
|
Create a curses pad and populate it with a line by stream
|
train
|
https://github.com/gapato/livestreamer-curses/blob/d841a421422db8c5b5a8bcfcff6b3ddd7ea8a64b/src/livestreamer_curses/streamlist.py#L460-L472
|
[
"def format_stream_line(self, stream):\n idf = '{0} '.format(stream['id']).rjust(ID_FIELD_WIDTH)\n name = ' {0}'.format(stream['name'][:NAME_FIELD_WIDTH-2]).ljust(NAME_FIELD_WIDTH)\n res = ' {0}'.format(stream['res'][:RES_FIELD_WIDTH-2]).ljust(RES_FIELD_WIDTH)\n views = '{0} '.format(stream['seen']).rjust(VIEWS_FIELD_WIDTH)\n p = self.q.get_process(stream['id']) != None\n if p:\n indicator = self.config.INDICATORS[4] # playing\n else:\n indicator = self.config.INDICATORS[stream['online']]\n return '{0} {1} {2} {3} {4}'.format(idf, name, res, views, indicator)\n"
] |
class StreamList(object):
def __init__(self, filename, config, list_streams=False, init_stream_list=None):
""" Init and try to load a stream list, nothing about curses yet """
global TITLE_STRING
self.db_was_read = False
# Open the storage (create it if necessary)
try:
db_dir = os.path.dirname(filename)
if not os.path.exists(db_dir):
os.makedirs(db_dir)
f = shelve.open(filename, 'c')
except Exception:
raise ShelveError(
'Database could not be opened, another livestreamer-curses instance might be already running. '
'Please note that a database created with Python 2.x cannot be used with Python 3.x and vice versa.'
)
self.max_id = 0
if init_stream_list:
f['streams'] = init_stream_list
for i, s in enumerate(f['streams']):
s['id'] = s.get('id') or i
s['seen'] = s.get('seen') or 0
s['last_seen'] = s.get('last_seen') or 0
self.max_id = i
f.sync()
# Sort streams by view count
try:
self.streams = sorted(f['streams'], key=lambda s:s['seen'], reverse=True)
for s in self.streams:
# Max id, needed when adding a new stream
self.max_id = max(self.max_id, s['id'])
s['online'] = 2
if list_streams:
print(json.dumps(self.streams))
f.close()
sys.exit(0)
except:
self.streams = []
self.db_was_read = True
self.filtered_streams = list(self.streams)
self.filter = ''
self.all_streams_offline = None
self.show_offline_streams = False
self.config = config
TITLE_STRING = TITLE_STRING.format(self.config.VERSION)
self.cmd_list = list(map(shlex.split, self.config.LIVESTREAMER_COMMANDS))
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.last_autocheck = 0
self.default_res = self.config.DEFAULT_RESOLUTION
self.store = f
self.store.sync()
self.no_streams = self.streams == []
self.no_stream_shown = self.no_streams
self.q = ProcessList(StreamPlayer().play)
self.livestreamer = livestreamer.Livestreamer()
def __del__(self):
""" Stop playing streams and sync storage """
try:
self.q.terminate()
if self.db_was_read:
self.store['cmd'] = self.cmd
self.store['streams'] = self.streams
self.store.close()
except:
pass
def __call__(self, s):
# Terminal initialization
self.init(s)
# Main event loop
self.run()
def init(self, s):
""" Initialize the text interface """
# Hide cursor
curses.curs_set(0)
self.s = s
self.s.keypad(1)
self.set_screen_size()
self.pads = {}
self.offsets = {}
self.init_help()
self.init_streams_pad()
self.current_pad = 'streams'
self.set_title(TITLE_STRING)
self.got_g = False
signal.signal(28, self.resize)
if self.config.CHECK_ONLINE_ON_START:
self.check_online_streams()
self.set_status('Ready')
def getheightwidth(self):
""" getwidth() -> (int, int)
Return the height and width of the console in characters
https://groups.google.com/forum/#!msg/comp.lang.python/CpUszNNXUQM/QADpl11Z-nAJ"""
try:
return int(os.environ["LINES"]), int(os.environ["COLUMNS"])
except KeyError:
height, width = struct.unpack(
"hhhh", ioctl(0, termios.TIOCGWINSZ ,"\000"*8))[0:2]
if not height:
return 25, 80
return height, width
def resize(self, signum, obj):
""" handler for SIGWINCH """
self.s.clear()
stream_cursor = self.pads['streams'].getyx()[0]
for pad in self.pads.values():
pad.clear()
self.s.refresh()
self.set_screen_size()
self.set_title(TITLE_STRING)
self.init_help()
self.init_streams_pad()
self.move(stream_cursor, absolute=True, pad_name='streams', refresh=False)
self.s.refresh()
self.show()
def run(self):
""" Main event loop """
# Show stream list
self.show_streams()
while True:
self.s.refresh()
# See if any stream has ended
self.check_stopped_streams()
# Wait on stdin or on the streams output
souts = self.q.get_stdouts()
souts.append(sys.stdin)
try:
(r, w, x) = select.select(souts, [], [], 1)
except select.error:
continue
if not r:
if self.config.CHECK_ONLINE_INTERVAL <= 0: continue
cur_time = int(time())
time_delta = cur_time - self.last_autocheck
if time_delta > self.config.CHECK_ONLINE_INTERVAL:
self.check_online_streams()
self.set_status('Next check at {0}'.format(
strftime('%H:%M:%S', localtime(time() + self.config.CHECK_ONLINE_INTERVAL))
))
continue
for fd in r:
if fd != sys.stdin:
# Set the new status line only if non-empty
msg = fd.readline()
if msg:
self.set_status(msg[:-1])
else:
# Main event loop
c = self.pads[self.current_pad].getch()
if c == curses.KEY_UP or c == ord('k'):
self.move(-1)
elif c == curses.KEY_DOWN or c == ord('j'):
self.move(1)
elif c == ord('f'):
if self.current_pad == 'streams':
self.filter_streams()
elif c == ord('F'):
if self.current_pad == 'streams':
self.clear_filter()
elif c == ord('g'):
if self.got_g:
self.move(0, absolute=True)
self.got_g = False
continue
self.got_g = True
elif c == ord('G'):
self.move(len(self.filtered_streams)-1, absolute=True)
elif c == ord('q'):
if self.current_pad == 'streams':
self.q.terminate()
return
else:
self.show_streams()
elif c == 27: # ESC
if self.current_pad != 'streams':
self.show_streams()
if self.current_pad == 'help':
continue
elif c == 10:
self.play_stream()
elif c == ord('s'):
self.stop_stream()
elif c == ord('c'):
self.reset_stream()
elif c == ord('n'):
self.edit_stream('name')
elif c == ord('r'):
self.edit_stream('res')
elif c == ord('u'):
self.edit_stream('url')
elif c == ord('l'):
self.show_commandline()
elif c == ord('L'):
self.shift_commandline()
elif c == ord('a'):
self.prompt_new_stream()
elif c == ord('d'):
self.delete_stream()
elif c == ord('o'):
self.show_offline_streams ^= True
self.refilter_streams()
elif c == ord('O'):
self.check_online_streams()
elif c == ord('h') or c == ord('?'):
self.show_help()
def set_screen_size(self):
""" Setup screen size and padding
We have need 2 free lines at the top and 2 free lines at the bottom
"""
height, width = self.getheightwidth()
curses.resizeterm(height, width)
self.pad_x = 0
self.max_y, self.max_x = (height-1, width-1)
self.pad_h = height-3
self.pad_w = width-2*self.pad_x
def overwrite_line(self, msg, attr=curses.A_NORMAL):
self.s.clrtoeol()
self.s.addstr(msg, attr)
self.s.chgat(attr)
def set_title(self, msg):
""" Set first header line text """
self.s.move(0, 0)
self.overwrite_line(msg, curses.A_REVERSE)
def set_header(self, msg):
""" Set second head line text """
self.s.move(1, 0)
self.overwrite_line(msg, attr=curses.A_NORMAL)
def set_footer(self, msg, reverse=True):
""" Set first footer line text """
self.s.move(self.max_y-1, 0)
if reverse:
self.overwrite_line(msg, attr=curses.A_REVERSE)
else:
self.overwrite_line(msg, attr=curses.A_NORMAL)
def clear_footer(self):
self.s.move(self.max_y-1, 0)
self.overwrite_line('')
def init_help(self):
help_pad_length = 27 # there should be a neater way to do this
h = curses.newpad(help_pad_length, self.pad_w)
h.keypad(1)
h.addstr( 0, 0, 'STREAM MANAGEMENT', curses.A_BOLD)
h.addstr( 2, 0, ' Enter : start stream')
h.addstr( 3, 0, ' s : stop stream')
h.addstr( 4, 0, ' r : change stream resolution')
h.addstr( 5, 0, ' n : change stream name')
h.addstr( 6, 0, ' u : change stream URL')
h.addstr( 7, 0, ' c : reset stream view count')
h.addstr( 8, 0, ' a : add stream')
h.addstr( 9, 0, ' d : delete stream')
h.addstr(11, 0, ' l : show command line')
h.addstr(12, 0, ' L : cycle command line')
h.addstr(15, 0, 'NAVIGATION', curses.A_BOLD)
h.addstr(17, 0, ' j/up : up one line')
h.addstr(18, 0, ' k/down: down one line')
h.addstr(19, 0, ' f : filter streams')
h.addstr(20, 0, ' F : clear filter')
h.addstr(21, 0, ' o : toggle offline streams')
h.addstr(22, 0, ' O : check for online streams')
h.addstr(23, 0, ' gg : go to top')
h.addstr(24, 0, ' G : go to bottom')
h.addstr(25, 0, ' h/? : show this help')
h.addstr(26, 0, ' q : quit')
self.pads['help'] = h
self.offsets['help'] = 0
def show(self):
funcs = {
'streams' : self.show_streams,
'help' : self.show_help
}
funcs[self.current_pad]()
def show_help(self):
""" Redraw Help screen and wait for any input to leave """
self.s.move(1,0)
self.s.clrtobot()
self.set_header('Help'.center(self.pad_w))
self.set_footer(' ESC or \'q\' to return to main menu')
self.s.refresh()
self.current_pad = 'help'
self.refresh_current_pad()
def show_streams(self):
self.s.move(1,0)
self.s.clrtobot()
self.current_pad = 'streams'
if self.no_stream_shown:
self.hide_streams_pad()
if self.no_streams:
self.s.addstr(5, 5, 'It seems you don\'t have any stream yet')
self.s.addstr(6, 5, 'Hit \'a\' to add a new one')
self.s.addstr(8, 5, 'Hit \'?\' for help')
elif self.all_streams_offline and not self.show_offline_streams:
self.s.addstr(5, 5, 'All streams are currently offline')
self.s.addstr(6, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(7, 5, 'Hit \'O\' to refresh')
self.s.addstr(9, 5, 'Hit \'?\' for help')
else:
self.s.addstr(5, 5, 'No stream matches your filter')
self.s.addstr(6, 5, 'Hit \'f\' to change filter')
self.s.addstr(7, 5, 'Hit \'F\' to clear')
self.s.addstr(8, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(10, 5, 'Hit \'?\' for help')
else:
idf = 'ID'.center(ID_FIELD_WIDTH)
name = 'Name'.center(NAME_FIELD_WIDTH)
res = 'Resolution'.center(RES_FIELD_WIDTH)
views = 'Views'.center(VIEWS_FIELD_WIDTH)
self.set_header('{0} {1} {2} {3} Status'.format(idf, name, res, views))
self.redraw_stream_footer()
self.redraw_status()
self.s.refresh()
if not self.no_stream_shown:
self.refresh_current_pad()
def hide_streams_pad(self):
pad = self.pads.get('streams')
if pad:
pad.refresh(0, 0, 2, 0, 2, 0)
def refresh_current_pad(self):
pad = self.pads[self.current_pad]
pad.refresh(self.offsets[self.current_pad], 0, 2, self.pad_x, self.pad_h, self.pad_w)
def move(self, direction, absolute=False, pad_name=None, refresh=True):
""" Scroll the current pad
direction : (int) move by one in the given direction
-1 is up, 1 is down. If absolute is True,
go to position direction.
Behaviour is affected by cursor_line and scroll_only below
absolute : (bool)
"""
# pad in this lists have the current line highlighted
cursor_line = [ 'streams' ]
# pads in this list will be moved screen-wise as opposed to line-wise
# if absolute is set, will go all the way top or all the way down depending
# on direction
scroll_only = [ 'help' ]
if not pad_name:
pad_name = self.current_pad
pad = self.pads[pad_name]
if pad_name == 'streams' and self.no_streams:
return
(row, col) = pad.getyx()
new_row = row
offset = self.offsets[pad_name]
new_offset = offset
if pad_name in scroll_only:
if absolute:
if direction > 0:
new_offset = pad.getmaxyx()[0] - self.pad_h + 1
else:
new_offset = 0
else:
if direction > 0:
new_offset = min(pad.getmaxyx()[0] - self.pad_h + 1, offset + self.pad_h)
elif offset > 0:
new_offset = max(0, offset - self.pad_h)
else:
if absolute and direction >= 0 and direction < pad.getmaxyx()[0]:
if direction < offset:
new_offset = direction
elif direction > offset + self.pad_h - 2:
new_offset = direction - self.pad_h + 2
new_row = direction
else:
if direction == -1 and row > 0:
if row == offset:
new_offset -= 1
new_row = row-1
elif direction == 1 and row < len(self.filtered_streams)-1:
if row == offset + self.pad_h - 2:
new_offset += 1
new_row = row+1
if pad_name in cursor_line:
pad.move(row, 0)
pad.chgat(curses.A_NORMAL)
self.offsets[pad_name] = new_offset
pad.move(new_row, 0)
if pad_name in cursor_line:
pad.chgat(curses.A_REVERSE)
if pad_name == 'streams':
self.redraw_stream_footer()
if refresh:
self.refresh_current_pad()
def format_stream_line(self, stream):
idf = '{0} '.format(stream['id']).rjust(ID_FIELD_WIDTH)
name = ' {0}'.format(stream['name'][:NAME_FIELD_WIDTH-2]).ljust(NAME_FIELD_WIDTH)
res = ' {0}'.format(stream['res'][:RES_FIELD_WIDTH-2]).ljust(RES_FIELD_WIDTH)
views = '{0} '.format(stream['seen']).rjust(VIEWS_FIELD_WIDTH)
p = self.q.get_process(stream['id']) != None
if p:
indicator = self.config.INDICATORS[4] # playing
else:
indicator = self.config.INDICATORS[stream['online']]
return '{0} {1} {2} {3} {4}'.format(idf, name, res, views, indicator)
def redraw_current_line(self):
""" Redraw the highlighted line """
if self.no_streams:
return
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
pad = self.pads['streams']
pad.move(row, 0)
pad.clrtoeol()
pad.addstr(row, 0, self.format_stream_line(s), curses.A_REVERSE)
pad.chgat(curses.A_REVERSE)
pad.move(row, 0)
self.refresh_current_pad()
def set_status(self, status):
self.status = status
self.redraw_status()
def redraw_status(self):
self.s.move(self.max_y, 0)
self.overwrite_line(self.status[:self.max_x], curses.A_NORMAL)
self.s.refresh()
def redraw_stream_footer(self):
if not self.no_stream_shown:
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
self.set_footer('{0}/{1} {2} {3}'.format(row+1, len(self.filtered_streams), s['url'], s['res']))
self.s.refresh()
def check_stopped_streams(self):
finished = self.q.get_finished()
for f in finished:
for s in self.streams:
try:
i = self.filtered_streams.index(s)
except ValueError:
continue
if f == s['id']:
self.set_footer('Stream {0} has stopped'.format(s['name']))
if i == self.pads[self.current_pad].getyx()[0]:
attr = curses.A_REVERSE
else:
attr = curses.A_NORMAL
self.pads['streams'].addstr(i, PLAYING_FIELD_OFFSET,
self.config.INDICATORS[s['online']], attr)
self.refresh_current_pad()
def _check_stream(self, url):
try:
plugin = self.livestreamer.resolve_url(url)
avail_streams = plugin.get_streams()
if avail_streams:
return 1
return 0
except:
return 3
def check_online_streams(self):
self.all_streams_offline = True
self.set_status(' Checking online streams...')
done_queue = queue.Queue()
def check_stream_managed(args):
url, queue = args
status = self._check_stream(url)
done_queue.put(url)
return status
pool = Pool(self.config.CHECK_ONLINE_THREADS)
args = [(s['url'], done_queue) for s in self.streams]
statuses = pool.map_async(check_stream_managed, args)
n_streams = len(self.streams)
while not statuses.ready():
sleep(0.1)
self.set_status(' Checked {0}/{1} streams...'.format(done_queue.qsize(), n_streams))
self.s.refresh()
statuses = statuses.get()
for i, s in enumerate(self.streams):
s['online'] = statuses[i]
if s['online']:
self.all_streams_offline = False
self.refilter_streams()
self.last_autocheck = int(time())
pool.close()
def prompt_input(self, prompt=''):
self.s.move(self.max_y, 0)
self.s.clrtoeol()
self.s.addstr(prompt)
curses.curs_set(1)
curses.echo()
r = self.s.getstr().decode()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y, 0)
self.s.clrtoeol()
return r
def prompt_confirmation(self, prompt='', def_yes=False):
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if def_yes:
hint = '[y]/n'
else:
hint = 'y/[n]'
self.s.addstr('{0} {1} '.format(prompt, hint))
curses.curs_set(1)
curses.echo()
r = self.s.getch()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if r == ord('y'):
return True
elif r == ord('n'):
return False
else:
return def_yes
def sync_store(self):
self.store['streams'] = self.streams
self.store.sync()
def bump_stream(self, stream, throttle=False):
t = int(time())
# only bump if stream was last started some time ago
if throttle and t - stream['last_seen'] < 60*1:
return
stream['seen'] += 1
stream['last_seen'] = t
self.sync_store()
def find_stream(self, sel, key='id'):
for s in self.streams:
if s[key] == sel:
return s
return None
def clear_filter(self):
self.filter = ''
self.refilter_streams()
def filter_streams(self):
self.filter = self.prompt_input('Filter: ').lower()
self.refilter_streams()
def refilter_streams(self, quiet=False):
self.filtered_streams = []
for s in self.streams:
if ((self.show_offline_streams or s['online'] in [1,2])
and (self.filter in s['name'].lower() or self.filter in s['url'].lower())):
self.filtered_streams.append(s)
self.filtered_streams.sort(key=lambda s:s['seen'], reverse=True)
self.no_stream_shown = len(self.filtered_streams) == 0
if not quiet:
self.status = ' Filter: {0} ({1}/{2} matches, {3} showing offline streams)'.format(
self.filter or '<empty>', len(self.filtered_streams), len(self.streams),
'' if self.show_offline_streams else 'NOT')
self.init_streams_pad()
self.redraw_stream_footer()
self.show_streams()
self.redraw_status()
def add_stream(self, name, url, res=None, bump=False):
ex_stream = self.find_stream(url, key='url')
if ex_stream:
if bump:
self.bump_stream(ex_stream)
else:
if bump:
seen = 1
last_seen = int(time())
else:
seen = last_seen = 0
if not self.streams:
idf = 1
else:
self.max_id += 1
idf = self.max_id
s_res = res or self.default_res
if type(s_res) == str:
actual_res = s_res
elif type(s_res) == dict:
actual_res = DEFAULT_RESOLUTION_HARD
for k,v in s_res.items():
if k in url:
actual_res = v
break
elif callable(s_res):
actual_res = s_res(url) or DEFAULT_RESOLUTION_HARD
else:
actual_res = DEFAULT_RESOLUTION_HARD
self.set_status(' Checking if new stream is online...')
self.s.refresh()
online = self._check_stream(url)
new_stream = {
'id' : idf,
'name' : name,
'seen' : seen,
'last_seen' : last_seen,
'res' : actual_res,
'url' : url,
'online' : online
}
self.streams.append(new_stream)
self.no_streams = False
self.refilter_streams()
self.sync_store()
def delete_stream(self):
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Delete stream {0}?'.format(s['name'])):
return
self.filtered_streams.remove(s)
self.streams.remove(s)
pad.deleteln()
self.sync_store()
if not self.streams:
self.no_streams = True
if not self.filtered_streams:
self.no_stream_shown = True
if pad.getyx()[0] == len(self.filtered_streams) and not self.no_stream_shown:
self.move(-1, refresh=False)
pad.chgat(curses.A_REVERSE)
self.redraw_current_line()
self.show_streams()
def reset_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Reset stream {0}?'.format(s['name'])):
return
s['seen'] = 0
s['last_seen'] = 0
self.redraw_current_line()
self.sync_store()
def edit_stream(self, attr):
prompt_info = {
'name' : 'Name',
'url' : 'URL',
'res' : 'Resolution'
}
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
new_val = self.prompt_input('{0} (empty to cancel): '.format(prompt_info[attr]))
if new_val != '':
s[attr] = new_val
self.redraw_current_line()
self.redraw_status()
self.redraw_stream_footer()
def show_commandline(self):
self.set_footer('{0}/{1} {2}'.format(self.cmd_index+1, len(self.cmd_list), ' '.join(self.cmd)))
def shift_commandline(self):
self.cmd_index += 1
if self.cmd_index == len(self.cmd_list):
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.show_commandline()
def prompt_new_stream(self):
url = self.prompt_input('New stream URL (empty to cancel): ')
name = url.split('/')[-1]
if name:
self.add_stream(name, url)
self.move(len(self.filtered_streams)-1, absolute=True, refresh=False)
self.show_streams()
def play_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
try:
self.q.put(s, self.cmd)
self.bump_stream(s, throttle=True)
self.redraw_current_line()
self.refresh_current_pad()
except Exception as e:
if type(e) == QueueDuplicate:
self.set_footer('This stream is already playing')
elif type(e) == OSError:
self.set_footer('/!\ Faulty command line: {0}'.format(e.strerror))
else:
raise e
def stop_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
p = self.q.terminate_process(s['id'])
if p:
self.redraw_current_line()
self.redraw_stream_footer()
self.redraw_status()
|
gapato/livestreamer-curses
|
src/livestreamer_curses/streamlist.py
|
StreamList.move
|
python
|
def move(self, direction, absolute=False, pad_name=None, refresh=True):
# pad in this lists have the current line highlighted
cursor_line = [ 'streams' ]
# pads in this list will be moved screen-wise as opposed to line-wise
# if absolute is set, will go all the way top or all the way down depending
# on direction
scroll_only = [ 'help' ]
if not pad_name:
pad_name = self.current_pad
pad = self.pads[pad_name]
if pad_name == 'streams' and self.no_streams:
return
(row, col) = pad.getyx()
new_row = row
offset = self.offsets[pad_name]
new_offset = offset
if pad_name in scroll_only:
if absolute:
if direction > 0:
new_offset = pad.getmaxyx()[0] - self.pad_h + 1
else:
new_offset = 0
else:
if direction > 0:
new_offset = min(pad.getmaxyx()[0] - self.pad_h + 1, offset + self.pad_h)
elif offset > 0:
new_offset = max(0, offset - self.pad_h)
else:
if absolute and direction >= 0 and direction < pad.getmaxyx()[0]:
if direction < offset:
new_offset = direction
elif direction > offset + self.pad_h - 2:
new_offset = direction - self.pad_h + 2
new_row = direction
else:
if direction == -1 and row > 0:
if row == offset:
new_offset -= 1
new_row = row-1
elif direction == 1 and row < len(self.filtered_streams)-1:
if row == offset + self.pad_h - 2:
new_offset += 1
new_row = row+1
if pad_name in cursor_line:
pad.move(row, 0)
pad.chgat(curses.A_NORMAL)
self.offsets[pad_name] = new_offset
pad.move(new_row, 0)
if pad_name in cursor_line:
pad.chgat(curses.A_REVERSE)
if pad_name == 'streams':
self.redraw_stream_footer()
if refresh:
self.refresh_current_pad()
|
Scroll the current pad
direction : (int) move by one in the given direction
-1 is up, 1 is down. If absolute is True,
go to position direction.
Behaviour is affected by cursor_line and scroll_only below
absolute : (bool)
|
train
|
https://github.com/gapato/livestreamer-curses/blob/d841a421422db8c5b5a8bcfcff6b3ddd7ea8a64b/src/livestreamer_curses/streamlist.py#L516-L580
|
[
"def refresh_current_pad(self):\n pad = self.pads[self.current_pad]\n pad.refresh(self.offsets[self.current_pad], 0, 2, self.pad_x, self.pad_h, self.pad_w)\n",
"def redraw_stream_footer(self):\n if not self.no_stream_shown:\n row = self.pads[self.current_pad].getyx()[0]\n s = self.filtered_streams[row]\n self.set_footer('{0}/{1} {2} {3}'.format(row+1, len(self.filtered_streams), s['url'], s['res']))\n self.s.refresh()\n"
] |
class StreamList(object):
def __init__(self, filename, config, list_streams=False, init_stream_list=None):
""" Init and try to load a stream list, nothing about curses yet """
global TITLE_STRING
self.db_was_read = False
# Open the storage (create it if necessary)
try:
db_dir = os.path.dirname(filename)
if not os.path.exists(db_dir):
os.makedirs(db_dir)
f = shelve.open(filename, 'c')
except Exception:
raise ShelveError(
'Database could not be opened, another livestreamer-curses instance might be already running. '
'Please note that a database created with Python 2.x cannot be used with Python 3.x and vice versa.'
)
self.max_id = 0
if init_stream_list:
f['streams'] = init_stream_list
for i, s in enumerate(f['streams']):
s['id'] = s.get('id') or i
s['seen'] = s.get('seen') or 0
s['last_seen'] = s.get('last_seen') or 0
self.max_id = i
f.sync()
# Sort streams by view count
try:
self.streams = sorted(f['streams'], key=lambda s:s['seen'], reverse=True)
for s in self.streams:
# Max id, needed when adding a new stream
self.max_id = max(self.max_id, s['id'])
s['online'] = 2
if list_streams:
print(json.dumps(self.streams))
f.close()
sys.exit(0)
except:
self.streams = []
self.db_was_read = True
self.filtered_streams = list(self.streams)
self.filter = ''
self.all_streams_offline = None
self.show_offline_streams = False
self.config = config
TITLE_STRING = TITLE_STRING.format(self.config.VERSION)
self.cmd_list = list(map(shlex.split, self.config.LIVESTREAMER_COMMANDS))
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.last_autocheck = 0
self.default_res = self.config.DEFAULT_RESOLUTION
self.store = f
self.store.sync()
self.no_streams = self.streams == []
self.no_stream_shown = self.no_streams
self.q = ProcessList(StreamPlayer().play)
self.livestreamer = livestreamer.Livestreamer()
def __del__(self):
""" Stop playing streams and sync storage """
try:
self.q.terminate()
if self.db_was_read:
self.store['cmd'] = self.cmd
self.store['streams'] = self.streams
self.store.close()
except:
pass
def __call__(self, s):
# Terminal initialization
self.init(s)
# Main event loop
self.run()
def init(self, s):
""" Initialize the text interface """
# Hide cursor
curses.curs_set(0)
self.s = s
self.s.keypad(1)
self.set_screen_size()
self.pads = {}
self.offsets = {}
self.init_help()
self.init_streams_pad()
self.current_pad = 'streams'
self.set_title(TITLE_STRING)
self.got_g = False
signal.signal(28, self.resize)
if self.config.CHECK_ONLINE_ON_START:
self.check_online_streams()
self.set_status('Ready')
def getheightwidth(self):
""" getwidth() -> (int, int)
Return the height and width of the console in characters
https://groups.google.com/forum/#!msg/comp.lang.python/CpUszNNXUQM/QADpl11Z-nAJ"""
try:
return int(os.environ["LINES"]), int(os.environ["COLUMNS"])
except KeyError:
height, width = struct.unpack(
"hhhh", ioctl(0, termios.TIOCGWINSZ ,"\000"*8))[0:2]
if not height:
return 25, 80
return height, width
def resize(self, signum, obj):
""" handler for SIGWINCH """
self.s.clear()
stream_cursor = self.pads['streams'].getyx()[0]
for pad in self.pads.values():
pad.clear()
self.s.refresh()
self.set_screen_size()
self.set_title(TITLE_STRING)
self.init_help()
self.init_streams_pad()
self.move(stream_cursor, absolute=True, pad_name='streams', refresh=False)
self.s.refresh()
self.show()
def run(self):
""" Main event loop """
# Show stream list
self.show_streams()
while True:
self.s.refresh()
# See if any stream has ended
self.check_stopped_streams()
# Wait on stdin or on the streams output
souts = self.q.get_stdouts()
souts.append(sys.stdin)
try:
(r, w, x) = select.select(souts, [], [], 1)
except select.error:
continue
if not r:
if self.config.CHECK_ONLINE_INTERVAL <= 0: continue
cur_time = int(time())
time_delta = cur_time - self.last_autocheck
if time_delta > self.config.CHECK_ONLINE_INTERVAL:
self.check_online_streams()
self.set_status('Next check at {0}'.format(
strftime('%H:%M:%S', localtime(time() + self.config.CHECK_ONLINE_INTERVAL))
))
continue
for fd in r:
if fd != sys.stdin:
# Set the new status line only if non-empty
msg = fd.readline()
if msg:
self.set_status(msg[:-1])
else:
# Main event loop
c = self.pads[self.current_pad].getch()
if c == curses.KEY_UP or c == ord('k'):
self.move(-1)
elif c == curses.KEY_DOWN or c == ord('j'):
self.move(1)
elif c == ord('f'):
if self.current_pad == 'streams':
self.filter_streams()
elif c == ord('F'):
if self.current_pad == 'streams':
self.clear_filter()
elif c == ord('g'):
if self.got_g:
self.move(0, absolute=True)
self.got_g = False
continue
self.got_g = True
elif c == ord('G'):
self.move(len(self.filtered_streams)-1, absolute=True)
elif c == ord('q'):
if self.current_pad == 'streams':
self.q.terminate()
return
else:
self.show_streams()
elif c == 27: # ESC
if self.current_pad != 'streams':
self.show_streams()
if self.current_pad == 'help':
continue
elif c == 10:
self.play_stream()
elif c == ord('s'):
self.stop_stream()
elif c == ord('c'):
self.reset_stream()
elif c == ord('n'):
self.edit_stream('name')
elif c == ord('r'):
self.edit_stream('res')
elif c == ord('u'):
self.edit_stream('url')
elif c == ord('l'):
self.show_commandline()
elif c == ord('L'):
self.shift_commandline()
elif c == ord('a'):
self.prompt_new_stream()
elif c == ord('d'):
self.delete_stream()
elif c == ord('o'):
self.show_offline_streams ^= True
self.refilter_streams()
elif c == ord('O'):
self.check_online_streams()
elif c == ord('h') or c == ord('?'):
self.show_help()
def set_screen_size(self):
""" Setup screen size and padding
We have need 2 free lines at the top and 2 free lines at the bottom
"""
height, width = self.getheightwidth()
curses.resizeterm(height, width)
self.pad_x = 0
self.max_y, self.max_x = (height-1, width-1)
self.pad_h = height-3
self.pad_w = width-2*self.pad_x
def overwrite_line(self, msg, attr=curses.A_NORMAL):
self.s.clrtoeol()
self.s.addstr(msg, attr)
self.s.chgat(attr)
def set_title(self, msg):
""" Set first header line text """
self.s.move(0, 0)
self.overwrite_line(msg, curses.A_REVERSE)
def set_header(self, msg):
""" Set second head line text """
self.s.move(1, 0)
self.overwrite_line(msg, attr=curses.A_NORMAL)
def set_footer(self, msg, reverse=True):
""" Set first footer line text """
self.s.move(self.max_y-1, 0)
if reverse:
self.overwrite_line(msg, attr=curses.A_REVERSE)
else:
self.overwrite_line(msg, attr=curses.A_NORMAL)
def clear_footer(self):
self.s.move(self.max_y-1, 0)
self.overwrite_line('')
def init_help(self):
help_pad_length = 27 # there should be a neater way to do this
h = curses.newpad(help_pad_length, self.pad_w)
h.keypad(1)
h.addstr( 0, 0, 'STREAM MANAGEMENT', curses.A_BOLD)
h.addstr( 2, 0, ' Enter : start stream')
h.addstr( 3, 0, ' s : stop stream')
h.addstr( 4, 0, ' r : change stream resolution')
h.addstr( 5, 0, ' n : change stream name')
h.addstr( 6, 0, ' u : change stream URL')
h.addstr( 7, 0, ' c : reset stream view count')
h.addstr( 8, 0, ' a : add stream')
h.addstr( 9, 0, ' d : delete stream')
h.addstr(11, 0, ' l : show command line')
h.addstr(12, 0, ' L : cycle command line')
h.addstr(15, 0, 'NAVIGATION', curses.A_BOLD)
h.addstr(17, 0, ' j/up : up one line')
h.addstr(18, 0, ' k/down: down one line')
h.addstr(19, 0, ' f : filter streams')
h.addstr(20, 0, ' F : clear filter')
h.addstr(21, 0, ' o : toggle offline streams')
h.addstr(22, 0, ' O : check for online streams')
h.addstr(23, 0, ' gg : go to top')
h.addstr(24, 0, ' G : go to bottom')
h.addstr(25, 0, ' h/? : show this help')
h.addstr(26, 0, ' q : quit')
self.pads['help'] = h
self.offsets['help'] = 0
def show(self):
funcs = {
'streams' : self.show_streams,
'help' : self.show_help
}
funcs[self.current_pad]()
def show_help(self):
""" Redraw Help screen and wait for any input to leave """
self.s.move(1,0)
self.s.clrtobot()
self.set_header('Help'.center(self.pad_w))
self.set_footer(' ESC or \'q\' to return to main menu')
self.s.refresh()
self.current_pad = 'help'
self.refresh_current_pad()
def init_streams_pad(self, start_row=0):
""" Create a curses pad and populate it with a line by stream """
y = 0
pad = curses.newpad(max(1,len(self.filtered_streams)), self.pad_w)
pad.keypad(1)
for s in self.filtered_streams:
pad.addstr(y, 0, self.format_stream_line(s))
y+=1
self.offsets['streams'] = 0
pad.move(start_row, 0)
if not self.no_stream_shown:
pad.chgat(curses.A_REVERSE)
self.pads['streams'] = pad
def show_streams(self):
self.s.move(1,0)
self.s.clrtobot()
self.current_pad = 'streams'
if self.no_stream_shown:
self.hide_streams_pad()
if self.no_streams:
self.s.addstr(5, 5, 'It seems you don\'t have any stream yet')
self.s.addstr(6, 5, 'Hit \'a\' to add a new one')
self.s.addstr(8, 5, 'Hit \'?\' for help')
elif self.all_streams_offline and not self.show_offline_streams:
self.s.addstr(5, 5, 'All streams are currently offline')
self.s.addstr(6, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(7, 5, 'Hit \'O\' to refresh')
self.s.addstr(9, 5, 'Hit \'?\' for help')
else:
self.s.addstr(5, 5, 'No stream matches your filter')
self.s.addstr(6, 5, 'Hit \'f\' to change filter')
self.s.addstr(7, 5, 'Hit \'F\' to clear')
self.s.addstr(8, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(10, 5, 'Hit \'?\' for help')
else:
idf = 'ID'.center(ID_FIELD_WIDTH)
name = 'Name'.center(NAME_FIELD_WIDTH)
res = 'Resolution'.center(RES_FIELD_WIDTH)
views = 'Views'.center(VIEWS_FIELD_WIDTH)
self.set_header('{0} {1} {2} {3} Status'.format(idf, name, res, views))
self.redraw_stream_footer()
self.redraw_status()
self.s.refresh()
if not self.no_stream_shown:
self.refresh_current_pad()
def hide_streams_pad(self):
pad = self.pads.get('streams')
if pad:
pad.refresh(0, 0, 2, 0, 2, 0)
def refresh_current_pad(self):
pad = self.pads[self.current_pad]
pad.refresh(self.offsets[self.current_pad], 0, 2, self.pad_x, self.pad_h, self.pad_w)
def format_stream_line(self, stream):
idf = '{0} '.format(stream['id']).rjust(ID_FIELD_WIDTH)
name = ' {0}'.format(stream['name'][:NAME_FIELD_WIDTH-2]).ljust(NAME_FIELD_WIDTH)
res = ' {0}'.format(stream['res'][:RES_FIELD_WIDTH-2]).ljust(RES_FIELD_WIDTH)
views = '{0} '.format(stream['seen']).rjust(VIEWS_FIELD_WIDTH)
p = self.q.get_process(stream['id']) != None
if p:
indicator = self.config.INDICATORS[4] # playing
else:
indicator = self.config.INDICATORS[stream['online']]
return '{0} {1} {2} {3} {4}'.format(idf, name, res, views, indicator)
def redraw_current_line(self):
""" Redraw the highlighted line """
if self.no_streams:
return
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
pad = self.pads['streams']
pad.move(row, 0)
pad.clrtoeol()
pad.addstr(row, 0, self.format_stream_line(s), curses.A_REVERSE)
pad.chgat(curses.A_REVERSE)
pad.move(row, 0)
self.refresh_current_pad()
def set_status(self, status):
self.status = status
self.redraw_status()
def redraw_status(self):
self.s.move(self.max_y, 0)
self.overwrite_line(self.status[:self.max_x], curses.A_NORMAL)
self.s.refresh()
def redraw_stream_footer(self):
if not self.no_stream_shown:
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
self.set_footer('{0}/{1} {2} {3}'.format(row+1, len(self.filtered_streams), s['url'], s['res']))
self.s.refresh()
def check_stopped_streams(self):
finished = self.q.get_finished()
for f in finished:
for s in self.streams:
try:
i = self.filtered_streams.index(s)
except ValueError:
continue
if f == s['id']:
self.set_footer('Stream {0} has stopped'.format(s['name']))
if i == self.pads[self.current_pad].getyx()[0]:
attr = curses.A_REVERSE
else:
attr = curses.A_NORMAL
self.pads['streams'].addstr(i, PLAYING_FIELD_OFFSET,
self.config.INDICATORS[s['online']], attr)
self.refresh_current_pad()
def _check_stream(self, url):
try:
plugin = self.livestreamer.resolve_url(url)
avail_streams = plugin.get_streams()
if avail_streams:
return 1
return 0
except:
return 3
def check_online_streams(self):
self.all_streams_offline = True
self.set_status(' Checking online streams...')
done_queue = queue.Queue()
def check_stream_managed(args):
url, queue = args
status = self._check_stream(url)
done_queue.put(url)
return status
pool = Pool(self.config.CHECK_ONLINE_THREADS)
args = [(s['url'], done_queue) for s in self.streams]
statuses = pool.map_async(check_stream_managed, args)
n_streams = len(self.streams)
while not statuses.ready():
sleep(0.1)
self.set_status(' Checked {0}/{1} streams...'.format(done_queue.qsize(), n_streams))
self.s.refresh()
statuses = statuses.get()
for i, s in enumerate(self.streams):
s['online'] = statuses[i]
if s['online']:
self.all_streams_offline = False
self.refilter_streams()
self.last_autocheck = int(time())
pool.close()
def prompt_input(self, prompt=''):
self.s.move(self.max_y, 0)
self.s.clrtoeol()
self.s.addstr(prompt)
curses.curs_set(1)
curses.echo()
r = self.s.getstr().decode()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y, 0)
self.s.clrtoeol()
return r
def prompt_confirmation(self, prompt='', def_yes=False):
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if def_yes:
hint = '[y]/n'
else:
hint = 'y/[n]'
self.s.addstr('{0} {1} '.format(prompt, hint))
curses.curs_set(1)
curses.echo()
r = self.s.getch()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if r == ord('y'):
return True
elif r == ord('n'):
return False
else:
return def_yes
def sync_store(self):
self.store['streams'] = self.streams
self.store.sync()
def bump_stream(self, stream, throttle=False):
t = int(time())
# only bump if stream was last started some time ago
if throttle and t - stream['last_seen'] < 60*1:
return
stream['seen'] += 1
stream['last_seen'] = t
self.sync_store()
def find_stream(self, sel, key='id'):
for s in self.streams:
if s[key] == sel:
return s
return None
def clear_filter(self):
self.filter = ''
self.refilter_streams()
def filter_streams(self):
self.filter = self.prompt_input('Filter: ').lower()
self.refilter_streams()
def refilter_streams(self, quiet=False):
self.filtered_streams = []
for s in self.streams:
if ((self.show_offline_streams or s['online'] in [1,2])
and (self.filter in s['name'].lower() or self.filter in s['url'].lower())):
self.filtered_streams.append(s)
self.filtered_streams.sort(key=lambda s:s['seen'], reverse=True)
self.no_stream_shown = len(self.filtered_streams) == 0
if not quiet:
self.status = ' Filter: {0} ({1}/{2} matches, {3} showing offline streams)'.format(
self.filter or '<empty>', len(self.filtered_streams), len(self.streams),
'' if self.show_offline_streams else 'NOT')
self.init_streams_pad()
self.redraw_stream_footer()
self.show_streams()
self.redraw_status()
def add_stream(self, name, url, res=None, bump=False):
ex_stream = self.find_stream(url, key='url')
if ex_stream:
if bump:
self.bump_stream(ex_stream)
else:
if bump:
seen = 1
last_seen = int(time())
else:
seen = last_seen = 0
if not self.streams:
idf = 1
else:
self.max_id += 1
idf = self.max_id
s_res = res or self.default_res
if type(s_res) == str:
actual_res = s_res
elif type(s_res) == dict:
actual_res = DEFAULT_RESOLUTION_HARD
for k,v in s_res.items():
if k in url:
actual_res = v
break
elif callable(s_res):
actual_res = s_res(url) or DEFAULT_RESOLUTION_HARD
else:
actual_res = DEFAULT_RESOLUTION_HARD
self.set_status(' Checking if new stream is online...')
self.s.refresh()
online = self._check_stream(url)
new_stream = {
'id' : idf,
'name' : name,
'seen' : seen,
'last_seen' : last_seen,
'res' : actual_res,
'url' : url,
'online' : online
}
self.streams.append(new_stream)
self.no_streams = False
self.refilter_streams()
self.sync_store()
def delete_stream(self):
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Delete stream {0}?'.format(s['name'])):
return
self.filtered_streams.remove(s)
self.streams.remove(s)
pad.deleteln()
self.sync_store()
if not self.streams:
self.no_streams = True
if not self.filtered_streams:
self.no_stream_shown = True
if pad.getyx()[0] == len(self.filtered_streams) and not self.no_stream_shown:
self.move(-1, refresh=False)
pad.chgat(curses.A_REVERSE)
self.redraw_current_line()
self.show_streams()
def reset_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Reset stream {0}?'.format(s['name'])):
return
s['seen'] = 0
s['last_seen'] = 0
self.redraw_current_line()
self.sync_store()
def edit_stream(self, attr):
prompt_info = {
'name' : 'Name',
'url' : 'URL',
'res' : 'Resolution'
}
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
new_val = self.prompt_input('{0} (empty to cancel): '.format(prompt_info[attr]))
if new_val != '':
s[attr] = new_val
self.redraw_current_line()
self.redraw_status()
self.redraw_stream_footer()
def show_commandline(self):
self.set_footer('{0}/{1} {2}'.format(self.cmd_index+1, len(self.cmd_list), ' '.join(self.cmd)))
def shift_commandline(self):
self.cmd_index += 1
if self.cmd_index == len(self.cmd_list):
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.show_commandline()
def prompt_new_stream(self):
url = self.prompt_input('New stream URL (empty to cancel): ')
name = url.split('/')[-1]
if name:
self.add_stream(name, url)
self.move(len(self.filtered_streams)-1, absolute=True, refresh=False)
self.show_streams()
def play_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
try:
self.q.put(s, self.cmd)
self.bump_stream(s, throttle=True)
self.redraw_current_line()
self.refresh_current_pad()
except Exception as e:
if type(e) == QueueDuplicate:
self.set_footer('This stream is already playing')
elif type(e) == OSError:
self.set_footer('/!\ Faulty command line: {0}'.format(e.strerror))
else:
raise e
def stop_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
p = self.q.terminate_process(s['id'])
if p:
self.redraw_current_line()
self.redraw_stream_footer()
self.redraw_status()
|
gapato/livestreamer-curses
|
src/livestreamer_curses/streamlist.py
|
StreamList.redraw_current_line
|
python
|
def redraw_current_line(self):
if self.no_streams:
return
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
pad = self.pads['streams']
pad.move(row, 0)
pad.clrtoeol()
pad.addstr(row, 0, self.format_stream_line(s), curses.A_REVERSE)
pad.chgat(curses.A_REVERSE)
pad.move(row, 0)
self.refresh_current_pad()
|
Redraw the highlighted line
|
train
|
https://github.com/gapato/livestreamer-curses/blob/d841a421422db8c5b5a8bcfcff6b3ddd7ea8a64b/src/livestreamer_curses/streamlist.py#L594-L606
|
[
"def refresh_current_pad(self):\n pad = self.pads[self.current_pad]\n pad.refresh(self.offsets[self.current_pad], 0, 2, self.pad_x, self.pad_h, self.pad_w)\n",
"def format_stream_line(self, stream):\n idf = '{0} '.format(stream['id']).rjust(ID_FIELD_WIDTH)\n name = ' {0}'.format(stream['name'][:NAME_FIELD_WIDTH-2]).ljust(NAME_FIELD_WIDTH)\n res = ' {0}'.format(stream['res'][:RES_FIELD_WIDTH-2]).ljust(RES_FIELD_WIDTH)\n views = '{0} '.format(stream['seen']).rjust(VIEWS_FIELD_WIDTH)\n p = self.q.get_process(stream['id']) != None\n if p:\n indicator = self.config.INDICATORS[4] # playing\n else:\n indicator = self.config.INDICATORS[stream['online']]\n return '{0} {1} {2} {3} {4}'.format(idf, name, res, views, indicator)\n"
] |
class StreamList(object):
def __init__(self, filename, config, list_streams=False, init_stream_list=None):
""" Init and try to load a stream list, nothing about curses yet """
global TITLE_STRING
self.db_was_read = False
# Open the storage (create it if necessary)
try:
db_dir = os.path.dirname(filename)
if not os.path.exists(db_dir):
os.makedirs(db_dir)
f = shelve.open(filename, 'c')
except Exception:
raise ShelveError(
'Database could not be opened, another livestreamer-curses instance might be already running. '
'Please note that a database created with Python 2.x cannot be used with Python 3.x and vice versa.'
)
self.max_id = 0
if init_stream_list:
f['streams'] = init_stream_list
for i, s in enumerate(f['streams']):
s['id'] = s.get('id') or i
s['seen'] = s.get('seen') or 0
s['last_seen'] = s.get('last_seen') or 0
self.max_id = i
f.sync()
# Sort streams by view count
try:
self.streams = sorted(f['streams'], key=lambda s:s['seen'], reverse=True)
for s in self.streams:
# Max id, needed when adding a new stream
self.max_id = max(self.max_id, s['id'])
s['online'] = 2
if list_streams:
print(json.dumps(self.streams))
f.close()
sys.exit(0)
except:
self.streams = []
self.db_was_read = True
self.filtered_streams = list(self.streams)
self.filter = ''
self.all_streams_offline = None
self.show_offline_streams = False
self.config = config
TITLE_STRING = TITLE_STRING.format(self.config.VERSION)
self.cmd_list = list(map(shlex.split, self.config.LIVESTREAMER_COMMANDS))
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.last_autocheck = 0
self.default_res = self.config.DEFAULT_RESOLUTION
self.store = f
self.store.sync()
self.no_streams = self.streams == []
self.no_stream_shown = self.no_streams
self.q = ProcessList(StreamPlayer().play)
self.livestreamer = livestreamer.Livestreamer()
def __del__(self):
""" Stop playing streams and sync storage """
try:
self.q.terminate()
if self.db_was_read:
self.store['cmd'] = self.cmd
self.store['streams'] = self.streams
self.store.close()
except:
pass
def __call__(self, s):
# Terminal initialization
self.init(s)
# Main event loop
self.run()
def init(self, s):
""" Initialize the text interface """
# Hide cursor
curses.curs_set(0)
self.s = s
self.s.keypad(1)
self.set_screen_size()
self.pads = {}
self.offsets = {}
self.init_help()
self.init_streams_pad()
self.current_pad = 'streams'
self.set_title(TITLE_STRING)
self.got_g = False
signal.signal(28, self.resize)
if self.config.CHECK_ONLINE_ON_START:
self.check_online_streams()
self.set_status('Ready')
def getheightwidth(self):
""" getwidth() -> (int, int)
Return the height and width of the console in characters
https://groups.google.com/forum/#!msg/comp.lang.python/CpUszNNXUQM/QADpl11Z-nAJ"""
try:
return int(os.environ["LINES"]), int(os.environ["COLUMNS"])
except KeyError:
height, width = struct.unpack(
"hhhh", ioctl(0, termios.TIOCGWINSZ ,"\000"*8))[0:2]
if not height:
return 25, 80
return height, width
def resize(self, signum, obj):
""" handler for SIGWINCH """
self.s.clear()
stream_cursor = self.pads['streams'].getyx()[0]
for pad in self.pads.values():
pad.clear()
self.s.refresh()
self.set_screen_size()
self.set_title(TITLE_STRING)
self.init_help()
self.init_streams_pad()
self.move(stream_cursor, absolute=True, pad_name='streams', refresh=False)
self.s.refresh()
self.show()
def run(self):
""" Main event loop """
# Show stream list
self.show_streams()
while True:
self.s.refresh()
# See if any stream has ended
self.check_stopped_streams()
# Wait on stdin or on the streams output
souts = self.q.get_stdouts()
souts.append(sys.stdin)
try:
(r, w, x) = select.select(souts, [], [], 1)
except select.error:
continue
if not r:
if self.config.CHECK_ONLINE_INTERVAL <= 0: continue
cur_time = int(time())
time_delta = cur_time - self.last_autocheck
if time_delta > self.config.CHECK_ONLINE_INTERVAL:
self.check_online_streams()
self.set_status('Next check at {0}'.format(
strftime('%H:%M:%S', localtime(time() + self.config.CHECK_ONLINE_INTERVAL))
))
continue
for fd in r:
if fd != sys.stdin:
# Set the new status line only if non-empty
msg = fd.readline()
if msg:
self.set_status(msg[:-1])
else:
# Main event loop
c = self.pads[self.current_pad].getch()
if c == curses.KEY_UP or c == ord('k'):
self.move(-1)
elif c == curses.KEY_DOWN or c == ord('j'):
self.move(1)
elif c == ord('f'):
if self.current_pad == 'streams':
self.filter_streams()
elif c == ord('F'):
if self.current_pad == 'streams':
self.clear_filter()
elif c == ord('g'):
if self.got_g:
self.move(0, absolute=True)
self.got_g = False
continue
self.got_g = True
elif c == ord('G'):
self.move(len(self.filtered_streams)-1, absolute=True)
elif c == ord('q'):
if self.current_pad == 'streams':
self.q.terminate()
return
else:
self.show_streams()
elif c == 27: # ESC
if self.current_pad != 'streams':
self.show_streams()
if self.current_pad == 'help':
continue
elif c == 10:
self.play_stream()
elif c == ord('s'):
self.stop_stream()
elif c == ord('c'):
self.reset_stream()
elif c == ord('n'):
self.edit_stream('name')
elif c == ord('r'):
self.edit_stream('res')
elif c == ord('u'):
self.edit_stream('url')
elif c == ord('l'):
self.show_commandline()
elif c == ord('L'):
self.shift_commandline()
elif c == ord('a'):
self.prompt_new_stream()
elif c == ord('d'):
self.delete_stream()
elif c == ord('o'):
self.show_offline_streams ^= True
self.refilter_streams()
elif c == ord('O'):
self.check_online_streams()
elif c == ord('h') or c == ord('?'):
self.show_help()
def set_screen_size(self):
""" Setup screen size and padding
We have need 2 free lines at the top and 2 free lines at the bottom
"""
height, width = self.getheightwidth()
curses.resizeterm(height, width)
self.pad_x = 0
self.max_y, self.max_x = (height-1, width-1)
self.pad_h = height-3
self.pad_w = width-2*self.pad_x
def overwrite_line(self, msg, attr=curses.A_NORMAL):
self.s.clrtoeol()
self.s.addstr(msg, attr)
self.s.chgat(attr)
def set_title(self, msg):
""" Set first header line text """
self.s.move(0, 0)
self.overwrite_line(msg, curses.A_REVERSE)
def set_header(self, msg):
""" Set second head line text """
self.s.move(1, 0)
self.overwrite_line(msg, attr=curses.A_NORMAL)
def set_footer(self, msg, reverse=True):
""" Set first footer line text """
self.s.move(self.max_y-1, 0)
if reverse:
self.overwrite_line(msg, attr=curses.A_REVERSE)
else:
self.overwrite_line(msg, attr=curses.A_NORMAL)
def clear_footer(self):
self.s.move(self.max_y-1, 0)
self.overwrite_line('')
def init_help(self):
help_pad_length = 27 # there should be a neater way to do this
h = curses.newpad(help_pad_length, self.pad_w)
h.keypad(1)
h.addstr( 0, 0, 'STREAM MANAGEMENT', curses.A_BOLD)
h.addstr( 2, 0, ' Enter : start stream')
h.addstr( 3, 0, ' s : stop stream')
h.addstr( 4, 0, ' r : change stream resolution')
h.addstr( 5, 0, ' n : change stream name')
h.addstr( 6, 0, ' u : change stream URL')
h.addstr( 7, 0, ' c : reset stream view count')
h.addstr( 8, 0, ' a : add stream')
h.addstr( 9, 0, ' d : delete stream')
h.addstr(11, 0, ' l : show command line')
h.addstr(12, 0, ' L : cycle command line')
h.addstr(15, 0, 'NAVIGATION', curses.A_BOLD)
h.addstr(17, 0, ' j/up : up one line')
h.addstr(18, 0, ' k/down: down one line')
h.addstr(19, 0, ' f : filter streams')
h.addstr(20, 0, ' F : clear filter')
h.addstr(21, 0, ' o : toggle offline streams')
h.addstr(22, 0, ' O : check for online streams')
h.addstr(23, 0, ' gg : go to top')
h.addstr(24, 0, ' G : go to bottom')
h.addstr(25, 0, ' h/? : show this help')
h.addstr(26, 0, ' q : quit')
self.pads['help'] = h
self.offsets['help'] = 0
def show(self):
funcs = {
'streams' : self.show_streams,
'help' : self.show_help
}
funcs[self.current_pad]()
def show_help(self):
""" Redraw Help screen and wait for any input to leave """
self.s.move(1,0)
self.s.clrtobot()
self.set_header('Help'.center(self.pad_w))
self.set_footer(' ESC or \'q\' to return to main menu')
self.s.refresh()
self.current_pad = 'help'
self.refresh_current_pad()
def init_streams_pad(self, start_row=0):
""" Create a curses pad and populate it with a line by stream """
y = 0
pad = curses.newpad(max(1,len(self.filtered_streams)), self.pad_w)
pad.keypad(1)
for s in self.filtered_streams:
pad.addstr(y, 0, self.format_stream_line(s))
y+=1
self.offsets['streams'] = 0
pad.move(start_row, 0)
if not self.no_stream_shown:
pad.chgat(curses.A_REVERSE)
self.pads['streams'] = pad
def show_streams(self):
self.s.move(1,0)
self.s.clrtobot()
self.current_pad = 'streams'
if self.no_stream_shown:
self.hide_streams_pad()
if self.no_streams:
self.s.addstr(5, 5, 'It seems you don\'t have any stream yet')
self.s.addstr(6, 5, 'Hit \'a\' to add a new one')
self.s.addstr(8, 5, 'Hit \'?\' for help')
elif self.all_streams_offline and not self.show_offline_streams:
self.s.addstr(5, 5, 'All streams are currently offline')
self.s.addstr(6, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(7, 5, 'Hit \'O\' to refresh')
self.s.addstr(9, 5, 'Hit \'?\' for help')
else:
self.s.addstr(5, 5, 'No stream matches your filter')
self.s.addstr(6, 5, 'Hit \'f\' to change filter')
self.s.addstr(7, 5, 'Hit \'F\' to clear')
self.s.addstr(8, 5, 'Hit \'o\' to show offline streams')
self.s.addstr(10, 5, 'Hit \'?\' for help')
else:
idf = 'ID'.center(ID_FIELD_WIDTH)
name = 'Name'.center(NAME_FIELD_WIDTH)
res = 'Resolution'.center(RES_FIELD_WIDTH)
views = 'Views'.center(VIEWS_FIELD_WIDTH)
self.set_header('{0} {1} {2} {3} Status'.format(idf, name, res, views))
self.redraw_stream_footer()
self.redraw_status()
self.s.refresh()
if not self.no_stream_shown:
self.refresh_current_pad()
def hide_streams_pad(self):
pad = self.pads.get('streams')
if pad:
pad.refresh(0, 0, 2, 0, 2, 0)
def refresh_current_pad(self):
pad = self.pads[self.current_pad]
pad.refresh(self.offsets[self.current_pad], 0, 2, self.pad_x, self.pad_h, self.pad_w)
def move(self, direction, absolute=False, pad_name=None, refresh=True):
""" Scroll the current pad
direction : (int) move by one in the given direction
-1 is up, 1 is down. If absolute is True,
go to position direction.
Behaviour is affected by cursor_line and scroll_only below
absolute : (bool)
"""
# pad in this lists have the current line highlighted
cursor_line = [ 'streams' ]
# pads in this list will be moved screen-wise as opposed to line-wise
# if absolute is set, will go all the way top or all the way down depending
# on direction
scroll_only = [ 'help' ]
if not pad_name:
pad_name = self.current_pad
pad = self.pads[pad_name]
if pad_name == 'streams' and self.no_streams:
return
(row, col) = pad.getyx()
new_row = row
offset = self.offsets[pad_name]
new_offset = offset
if pad_name in scroll_only:
if absolute:
if direction > 0:
new_offset = pad.getmaxyx()[0] - self.pad_h + 1
else:
new_offset = 0
else:
if direction > 0:
new_offset = min(pad.getmaxyx()[0] - self.pad_h + 1, offset + self.pad_h)
elif offset > 0:
new_offset = max(0, offset - self.pad_h)
else:
if absolute and direction >= 0 and direction < pad.getmaxyx()[0]:
if direction < offset:
new_offset = direction
elif direction > offset + self.pad_h - 2:
new_offset = direction - self.pad_h + 2
new_row = direction
else:
if direction == -1 and row > 0:
if row == offset:
new_offset -= 1
new_row = row-1
elif direction == 1 and row < len(self.filtered_streams)-1:
if row == offset + self.pad_h - 2:
new_offset += 1
new_row = row+1
if pad_name in cursor_line:
pad.move(row, 0)
pad.chgat(curses.A_NORMAL)
self.offsets[pad_name] = new_offset
pad.move(new_row, 0)
if pad_name in cursor_line:
pad.chgat(curses.A_REVERSE)
if pad_name == 'streams':
self.redraw_stream_footer()
if refresh:
self.refresh_current_pad()
def format_stream_line(self, stream):
idf = '{0} '.format(stream['id']).rjust(ID_FIELD_WIDTH)
name = ' {0}'.format(stream['name'][:NAME_FIELD_WIDTH-2]).ljust(NAME_FIELD_WIDTH)
res = ' {0}'.format(stream['res'][:RES_FIELD_WIDTH-2]).ljust(RES_FIELD_WIDTH)
views = '{0} '.format(stream['seen']).rjust(VIEWS_FIELD_WIDTH)
p = self.q.get_process(stream['id']) != None
if p:
indicator = self.config.INDICATORS[4] # playing
else:
indicator = self.config.INDICATORS[stream['online']]
return '{0} {1} {2} {3} {4}'.format(idf, name, res, views, indicator)
def set_status(self, status):
self.status = status
self.redraw_status()
def redraw_status(self):
self.s.move(self.max_y, 0)
self.overwrite_line(self.status[:self.max_x], curses.A_NORMAL)
self.s.refresh()
def redraw_stream_footer(self):
if not self.no_stream_shown:
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
self.set_footer('{0}/{1} {2} {3}'.format(row+1, len(self.filtered_streams), s['url'], s['res']))
self.s.refresh()
def check_stopped_streams(self):
finished = self.q.get_finished()
for f in finished:
for s in self.streams:
try:
i = self.filtered_streams.index(s)
except ValueError:
continue
if f == s['id']:
self.set_footer('Stream {0} has stopped'.format(s['name']))
if i == self.pads[self.current_pad].getyx()[0]:
attr = curses.A_REVERSE
else:
attr = curses.A_NORMAL
self.pads['streams'].addstr(i, PLAYING_FIELD_OFFSET,
self.config.INDICATORS[s['online']], attr)
self.refresh_current_pad()
def _check_stream(self, url):
try:
plugin = self.livestreamer.resolve_url(url)
avail_streams = plugin.get_streams()
if avail_streams:
return 1
return 0
except:
return 3
def check_online_streams(self):
self.all_streams_offline = True
self.set_status(' Checking online streams...')
done_queue = queue.Queue()
def check_stream_managed(args):
url, queue = args
status = self._check_stream(url)
done_queue.put(url)
return status
pool = Pool(self.config.CHECK_ONLINE_THREADS)
args = [(s['url'], done_queue) for s in self.streams]
statuses = pool.map_async(check_stream_managed, args)
n_streams = len(self.streams)
while not statuses.ready():
sleep(0.1)
self.set_status(' Checked {0}/{1} streams...'.format(done_queue.qsize(), n_streams))
self.s.refresh()
statuses = statuses.get()
for i, s in enumerate(self.streams):
s['online'] = statuses[i]
if s['online']:
self.all_streams_offline = False
self.refilter_streams()
self.last_autocheck = int(time())
pool.close()
def prompt_input(self, prompt=''):
self.s.move(self.max_y, 0)
self.s.clrtoeol()
self.s.addstr(prompt)
curses.curs_set(1)
curses.echo()
r = self.s.getstr().decode()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y, 0)
self.s.clrtoeol()
return r
def prompt_confirmation(self, prompt='', def_yes=False):
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if def_yes:
hint = '[y]/n'
else:
hint = 'y/[n]'
self.s.addstr('{0} {1} '.format(prompt, hint))
curses.curs_set(1)
curses.echo()
r = self.s.getch()
curses.noecho()
curses.curs_set(0)
self.s.move(self.max_y-1, 0)
self.s.clrtoeol()
if r == ord('y'):
return True
elif r == ord('n'):
return False
else:
return def_yes
def sync_store(self):
self.store['streams'] = self.streams
self.store.sync()
def bump_stream(self, stream, throttle=False):
t = int(time())
# only bump if stream was last started some time ago
if throttle and t - stream['last_seen'] < 60*1:
return
stream['seen'] += 1
stream['last_seen'] = t
self.sync_store()
def find_stream(self, sel, key='id'):
for s in self.streams:
if s[key] == sel:
return s
return None
def clear_filter(self):
self.filter = ''
self.refilter_streams()
def filter_streams(self):
self.filter = self.prompt_input('Filter: ').lower()
self.refilter_streams()
def refilter_streams(self, quiet=False):
self.filtered_streams = []
for s in self.streams:
if ((self.show_offline_streams or s['online'] in [1,2])
and (self.filter in s['name'].lower() or self.filter in s['url'].lower())):
self.filtered_streams.append(s)
self.filtered_streams.sort(key=lambda s:s['seen'], reverse=True)
self.no_stream_shown = len(self.filtered_streams) == 0
if not quiet:
self.status = ' Filter: {0} ({1}/{2} matches, {3} showing offline streams)'.format(
self.filter or '<empty>', len(self.filtered_streams), len(self.streams),
'' if self.show_offline_streams else 'NOT')
self.init_streams_pad()
self.redraw_stream_footer()
self.show_streams()
self.redraw_status()
def add_stream(self, name, url, res=None, bump=False):
ex_stream = self.find_stream(url, key='url')
if ex_stream:
if bump:
self.bump_stream(ex_stream)
else:
if bump:
seen = 1
last_seen = int(time())
else:
seen = last_seen = 0
if not self.streams:
idf = 1
else:
self.max_id += 1
idf = self.max_id
s_res = res or self.default_res
if type(s_res) == str:
actual_res = s_res
elif type(s_res) == dict:
actual_res = DEFAULT_RESOLUTION_HARD
for k,v in s_res.items():
if k in url:
actual_res = v
break
elif callable(s_res):
actual_res = s_res(url) or DEFAULT_RESOLUTION_HARD
else:
actual_res = DEFAULT_RESOLUTION_HARD
self.set_status(' Checking if new stream is online...')
self.s.refresh()
online = self._check_stream(url)
new_stream = {
'id' : idf,
'name' : name,
'seen' : seen,
'last_seen' : last_seen,
'res' : actual_res,
'url' : url,
'online' : online
}
self.streams.append(new_stream)
self.no_streams = False
self.refilter_streams()
self.sync_store()
def delete_stream(self):
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Delete stream {0}?'.format(s['name'])):
return
self.filtered_streams.remove(s)
self.streams.remove(s)
pad.deleteln()
self.sync_store()
if not self.streams:
self.no_streams = True
if not self.filtered_streams:
self.no_stream_shown = True
if pad.getyx()[0] == len(self.filtered_streams) and not self.no_stream_shown:
self.move(-1, refresh=False)
pad.chgat(curses.A_REVERSE)
self.redraw_current_line()
self.show_streams()
def reset_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
if not self.prompt_confirmation('Reset stream {0}?'.format(s['name'])):
return
s['seen'] = 0
s['last_seen'] = 0
self.redraw_current_line()
self.sync_store()
def edit_stream(self, attr):
prompt_info = {
'name' : 'Name',
'url' : 'URL',
'res' : 'Resolution'
}
if self.no_streams:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
new_val = self.prompt_input('{0} (empty to cancel): '.format(prompt_info[attr]))
if new_val != '':
s[attr] = new_val
self.redraw_current_line()
self.redraw_status()
self.redraw_stream_footer()
def show_commandline(self):
self.set_footer('{0}/{1} {2}'.format(self.cmd_index+1, len(self.cmd_list), ' '.join(self.cmd)))
def shift_commandline(self):
self.cmd_index += 1
if self.cmd_index == len(self.cmd_list):
self.cmd_index = 0
self.cmd = self.cmd_list[self.cmd_index]
self.show_commandline()
def prompt_new_stream(self):
url = self.prompt_input('New stream URL (empty to cancel): ')
name = url.split('/')[-1]
if name:
self.add_stream(name, url)
self.move(len(self.filtered_streams)-1, absolute=True, refresh=False)
self.show_streams()
def play_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
try:
self.q.put(s, self.cmd)
self.bump_stream(s, throttle=True)
self.redraw_current_line()
self.refresh_current_pad()
except Exception as e:
if type(e) == QueueDuplicate:
self.set_footer('This stream is already playing')
elif type(e) == OSError:
self.set_footer('/!\ Faulty command line: {0}'.format(e.strerror))
else:
raise e
def stop_stream(self):
if self.no_stream_shown:
return
pad = self.pads[self.current_pad]
s = self.filtered_streams[pad.getyx()[0]]
p = self.q.terminate_process(s['id'])
if p:
self.redraw_current_line()
self.redraw_stream_footer()
self.redraw_status()
|
Synerty/peek-plugin-base
|
peek_plugin_base/server/PeekPlatformServerHttpHookABC.py
|
PeekPlatformServerHttpHookABC.addServerResource
|
python
|
def addServerResource(self, pluginSubPath: bytes, resource: BasicResource) -> None:
pluginSubPath = pluginSubPath.strip(b'/')
self.__rootServerResource.putChild(pluginSubPath, resource)
|
Add Server Resource
Add a cusotom implementation of a served http resource.
:param pluginSubPath: The resource path where you want to serve this resource.
:param resource: The resource to serve.
:return: None
|
train
|
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/server/PeekPlatformServerHttpHookABC.py#L30-L41
| null |
class PeekPlatformServerHttpHookABC(metaclass=ABCMeta):
""" Peek Platform Server HTTP Hook
The methods provided by this class apply to the HTTP service that provides
resources (vortex, etc) beween the server and the agent, worker and client.
These resources will not be availible to the web apps.
"""
def __init__(self):
self.__rootServerResource = FileUnderlayResource()
def addServerStaticResourceDir(self, dir: str) -> None:
""" Add Server Static Resource Directory
Calling this method sets up directory :code:`dir` to be served by the site.
:param dir: The file system directory to be served.
:return: None
"""
self.__rootServerResource.addFileSystemRoot(dir)
@property
def rootServerResource(self) -> BasicResource:
""" Server Root Resource
This returns the root site resource for this plugin.
"""
return self.__rootServerResource
|
Synerty/peek-plugin-base
|
peek_plugin_base/client/PluginClientEntryHookABC.py
|
PluginClientEntryHookABC.angularFrontendAppDir
|
python
|
def angularFrontendAppDir(self) -> str:
relDir = self._packageCfg.config.plugin.title(require_string)
dir = os.path.join(self._pluginRoot, relDir)
if not os.path.isdir(dir): raise NotADirectoryError(dir)
return dir
|
Angular Frontend Dir
This directory will be linked into the angular app when it is compiled.
:return: The absolute path of the Angular2 app directory.
|
train
|
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/client/PluginClientEntryHookABC.py#L31-L41
| null |
class PluginClientEntryHookABC(PluginCommonEntryHookABC):
def __init__(self, pluginName: str, pluginRootDir: str, platform: PeekClientPlatformHookABC):
PluginCommonEntryHookABC.__init__(self, pluginName=pluginName, pluginRootDir=pluginRootDir)
self._platform = platform
@property
def platform(self) -> PeekClientPlatformHookABC:
return self._platform
@property
def publishedClientApi(self) -> Optional[object]:
return None
@property
def angularMainModule(self) -> str:
""" Angular Main Module
:return: The name of the main module that the Angular2 router will lazy load.
"""
return self._angularMainModule
@property
|
Synerty/peek-plugin-base
|
peek_plugin_base/storage/AlembicEnvBase.py
|
AlembicEnvBase.run
|
python
|
def run(self):
connectable = engine_from_config(
self._config.get_section(self._config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
ensureSchemaExists(connectable, self._schemaName)
context.configure(
connection=connection,
target_metadata=self._targetMetadata,
include_object=self._includeObjectFilter,
include_schemas=True,
version_table_schema=self._schemaName
)
with context.begin_transaction():
context.run_migrations()
|
Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
|
train
|
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/storage/AlembicEnvBase.py#L42-L66
|
[
"def ensureSchemaExists(engine, schemaName):\n # Ensure the schema exists\n\n if isinstance(engine.dialect, MSDialect):\n if list(engine.execute(\"SELECT SCHEMA_ID('%s')\" % schemaName))[0][0] is None:\n engine.execute(\"CREATE SCHEMA [%s]\" % schemaName)\n\n elif isinstance(engine.dialect, PGDialect):\n engine.execute(\n 'CREATE SCHEMA IF NOT EXISTS \"%s\" ' % schemaName)\n\n else:\n raise Exception('unknown dialect %s' % engine.dialect)\n"
] |
class AlembicEnvBase:
def __init__(self, targetMetadata):
from peek_platform.util.LogUtil import setupPeekLogger
setupPeekLogger()
self._config = context.config
self._targetMetadata = targetMetadata
self._schemaName = targetMetadata.schema
def _includeObjectFilter(self, object, name, type_, reflected, compare_to):
# If it's not in this schema, don't include it
if hasattr(object, 'schema') and object.schema != self._schemaName:
return False
return True
def run(self):
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
self._config.get_section(self._config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
ensureSchemaExists(connectable, self._schemaName)
context.configure(
connection=connection,
target_metadata=self._targetMetadata,
include_object=self._includeObjectFilter,
include_schemas=True,
version_table_schema=self._schemaName
)
with context.begin_transaction():
context.run_migrations()
|
Synerty/peek-plugin-base
|
peek_plugin_base/server/PeekPlatformAdminHttpHookABC.py
|
PeekPlatformAdminHttpHookABC.addAdminResource
|
python
|
def addAdminResource(self, pluginSubPath: bytes, resource: BasicResource) -> None:
pluginSubPath = pluginSubPath.strip(b'/')
self.__rootAdminResource.putChild(pluginSubPath, resource)
|
Add Site Resource
Add a cusotom implementation of a served http resource.
:param pluginSubPath: The resource path where you want to serve this resource.
:param resource: The resource to serve.
:return: None
|
train
|
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/server/PeekPlatformAdminHttpHookABC.py#L31-L42
| null |
class PeekPlatformAdminHttpHookABC(metaclass=ABCMeta):
""" Peek Platform Site HTTP Hook
The methods provided by this class apply to the HTTP sites served by the
Client service for the mobile and desktop apps, and the Server service for the
admin app.
It is not the HTTP service that provides resources (vortex, etc) beween the server
and the agent, worker and client.
"""
def __init__(self):
self.__rootAdminResource = FileUnderlayResource()
def addAdminStaticResourceDir(self, dir: str) -> None:
""" Add Site Static Resource Directory
Calling this method sets up directory :code:`dir` to be served by the site.
:param dir: The file system directory to be served.
:return: None
"""
self.__rootAdminResource.addFileSystemRoot(dir)
@property
def rootAdminResource(self) -> BasicResource:
""" Site Root Resource
This returns the root site resource for this plugin.
"""
return self.__rootAdminResource
|
Synerty/peek-plugin-base
|
peek_plugin_base/client/PeekPlatformMobileHttpHookABC.py
|
PeekPlatformMobileHttpHookABC.addMobileResource
|
python
|
def addMobileResource(self, pluginSubPath: bytes, resource: BasicResource) -> None:
pluginSubPath = pluginSubPath.strip(b'/')
self.__rootMobileResource.putChild(pluginSubPath, resource)
|
Add Site Resource
Add a cusotom implementation of a served http resource.
:param pluginSubPath: The resource path where you want to serve this resource.
:param resource: The resource to serve.
:return: None
|
train
|
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/client/PeekPlatformMobileHttpHookABC.py#L31-L42
| null |
class PeekPlatformMobileHttpHookABC(metaclass=ABCMeta):
""" Peek Platform Site HTTP Hook
The methods provided by this class apply to the HTTP sites served by the
Client service for the mobile and desktop apps, and the Server service for the
admin app.
It is not the HTTP service that provides resources (vortex, etc) beween the server
and the agent, worker and client.
"""
def __init__(self):
self.__rootMobileResource = FileUnderlayResource()
def addMobileStaticResourceDir(self, dir: str) -> None:
""" Add Site Static Resource Directory
Calling this method sets up directory :code:`dir` to be served by the site.
:param dir: The file system directory to be served.
:return: None
"""
self.__rootMobileResource.addFileSystemRoot(dir)
@property
def rootMobileResource(self) -> BasicResource:
""" Site Root Resource
This returns the root site resource for this plugin.
"""
return self.__rootMobileResource
|
Synerty/peek-plugin-base
|
peek_plugin_base/worker/CeleryDbConn.py
|
setConnStringForWindows
|
python
|
def setConnStringForWindows():
global _dbConnectString
from peek_platform.file_config.PeekFileConfigABC import PeekFileConfigABC
from peek_platform.file_config.PeekFileConfigSqlAlchemyMixin import \
PeekFileConfigSqlAlchemyMixin
from peek_platform import PeekPlatformConfig
class _WorkerTaskConfigMixin(PeekFileConfigABC,
PeekFileConfigSqlAlchemyMixin):
pass
PeekPlatformConfig.componentName = peekWorkerName
_dbConnectString = _WorkerTaskConfigMixin().dbConnectString
|
Set Conn String for Windiws
Windows has a different way of forking processes, which causes the
@worker_process_init.connect signal not to work in "CeleryDbConnInit"
|
train
|
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/worker/CeleryDbConn.py#L20-L40
| null |
import logging
import platform
from threading import Lock
from typing import Iterable, Optional
from sqlalchemy.engine import create_engine
from sqlalchemy.orm.scoping import scoped_session
from sqlalchemy.orm.session import sessionmaker
from peek_plugin_base.PeekVortexUtil import peekWorkerName
from peek_plugin_base.storage.DbConnection import _commonPrefetchDeclarativeIds
logger = logging.getLogger(__name__)
_dbConnectString = None
__dbEngine = None
__ScopedSession = None
_isWindows = platform.system() is "Windows"
# For celery, an engine is created per worker
def getDbEngine():
global __dbEngine
if _dbConnectString is None:
if _isWindows:
from peek_platform.ConfigCeleryApp import configureCeleryLogging
configureCeleryLogging()
setConnStringForWindows()
else:
msg = "CeleryDbConn initialisation error"
logger.error(msg)
raise Exception(msg)
if not __dbEngine:
__dbEngine = create_engine(
_dbConnectString,
echo=False,
pool_size=4, # This is per fork
max_overflow=10, # Number that the pool size can exceed when required
pool_timeout=20, # Timeout for getting conn from pool
pool_recycle=1200 # Reconnect?? after 10 minutes
)
return __dbEngine
def getDbSession():
global __ScopedSession
if not __ScopedSession:
__ScopedSession = scoped_session(sessionmaker(bind=getDbEngine()))
return __ScopedSession()
_sequenceMutex = Lock()
def prefetchDeclarativeIds(Declarative, count) -> Optional[Iterable[int]]:
""" Prefetch Declarative IDs
This function prefetches a chunk of IDs from a database sequence.
Doing this allows us to preallocate the IDs before an insert, which significantly
speeds up :
* Orm inserts, especially those using inheritance
* When we need the ID to assign it to a related object that we're also inserting.
:param Declarative: The SQLAlchemy declarative class.
(The class that inherits from DeclarativeBase)
:param count: The number of IDs to prefetch
:return: An iterable that dispenses the new IDs
"""
return _commonPrefetchDeclarativeIds(
getDbEngine(), _sequenceMutex, Declarative, count
)
|
Synerty/peek-plugin-base
|
peek_plugin_base/worker/CeleryDbConn.py
|
prefetchDeclarativeIds
|
python
|
def prefetchDeclarativeIds(Declarative, count) -> Optional[Iterable[int]]:
return _commonPrefetchDeclarativeIds(
getDbEngine(), _sequenceMutex, Declarative, count
)
|
Prefetch Declarative IDs
This function prefetches a chunk of IDs from a database sequence.
Doing this allows us to preallocate the IDs before an insert, which significantly
speeds up :
* Orm inserts, especially those using inheritance
* When we need the ID to assign it to a related object that we're also inserting.
:param Declarative: The SQLAlchemy declarative class.
(The class that inherits from DeclarativeBase)
:param count: The number of IDs to prefetch
:return: An iterable that dispenses the new IDs
|
train
|
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/worker/CeleryDbConn.py#L83-L102
|
[
"def _commonPrefetchDeclarativeIds(engine, mutex,\n Declarative, count) -> Optional[Iterable[int]]:\n \"\"\" Common Prefetch Declarative IDs\n\n This function is used by the worker and server\n \"\"\"\n if not count:\n logger.debug(\"Count was zero, no range returned\")\n return\n\n conn = engine.connect()\n transaction = conn.begin()\n mutex.acquire()\n try:\n sequence = Sequence('%s_id_seq' % Declarative.__tablename__,\n schema=Declarative.metadata.schema)\n\n if isPostGreSQLDialect(engine):\n sql = \"SELECT setval('%(seq)s', (select nextval('%(seq)s') + %(add)s), true)\"\n sql %= {\n 'seq': '\"%s\".\"%s\"' % (sequence.schema, sequence.name),\n 'add': count\n }\n nextStartId = conn.execute(sql).fetchone()[0]\n startId = nextStartId - count\n\n elif isMssqlDialect(engine):\n startId = conn.execute(\n 'SELECT NEXT VALUE FOR \"%s\".\"%s\"'\n % (sequence.schema, sequence.name)\n ).fetchone()[0] + 1\n\n nextStartId = startId + count\n\n conn.execute('alter sequence \"%s\".\"%s\" restart with %s'\n % (sequence.schema, sequence.name, nextStartId))\n\n else:\n raise NotImplementedError()\n\n transaction.commit()\n\n return iter(range(startId, nextStartId))\n\n finally:\n mutex.release()\n conn.close()\n",
"def getDbEngine():\n global __dbEngine\n\n if _dbConnectString is None:\n if _isWindows:\n from peek_platform.ConfigCeleryApp import configureCeleryLogging\n configureCeleryLogging()\n setConnStringForWindows()\n\n else:\n msg = \"CeleryDbConn initialisation error\"\n logger.error(msg)\n raise Exception(msg)\n\n if not __dbEngine:\n __dbEngine = create_engine(\n _dbConnectString,\n echo=False,\n pool_size=4, # This is per fork\n max_overflow=10, # Number that the pool size can exceed when required\n pool_timeout=20, # Timeout for getting conn from pool\n pool_recycle=1200 # Reconnect?? after 10 minutes\n )\n\n return __dbEngine\n"
] |
import logging
import platform
from threading import Lock
from typing import Iterable, Optional
from sqlalchemy.engine import create_engine
from sqlalchemy.orm.scoping import scoped_session
from sqlalchemy.orm.session import sessionmaker
from peek_plugin_base.PeekVortexUtil import peekWorkerName
from peek_plugin_base.storage.DbConnection import _commonPrefetchDeclarativeIds
logger = logging.getLogger(__name__)
_dbConnectString = None
__dbEngine = None
__ScopedSession = None
_isWindows = platform.system() is "Windows"
def setConnStringForWindows():
""" Set Conn String for Windiws
Windows has a different way of forking processes, which causes the
@worker_process_init.connect signal not to work in "CeleryDbConnInit"
"""
global _dbConnectString
from peek_platform.file_config.PeekFileConfigABC import PeekFileConfigABC
from peek_platform.file_config.PeekFileConfigSqlAlchemyMixin import \
PeekFileConfigSqlAlchemyMixin
from peek_platform import PeekPlatformConfig
class _WorkerTaskConfigMixin(PeekFileConfigABC,
PeekFileConfigSqlAlchemyMixin):
pass
PeekPlatformConfig.componentName = peekWorkerName
_dbConnectString = _WorkerTaskConfigMixin().dbConnectString
# For celery, an engine is created per worker
def getDbEngine():
global __dbEngine
if _dbConnectString is None:
if _isWindows:
from peek_platform.ConfigCeleryApp import configureCeleryLogging
configureCeleryLogging()
setConnStringForWindows()
else:
msg = "CeleryDbConn initialisation error"
logger.error(msg)
raise Exception(msg)
if not __dbEngine:
__dbEngine = create_engine(
_dbConnectString,
echo=False,
pool_size=4, # This is per fork
max_overflow=10, # Number that the pool size can exceed when required
pool_timeout=20, # Timeout for getting conn from pool
pool_recycle=1200 # Reconnect?? after 10 minutes
)
return __dbEngine
def getDbSession():
global __ScopedSession
if not __ScopedSession:
__ScopedSession = scoped_session(sessionmaker(bind=getDbEngine()))
return __ScopedSession()
_sequenceMutex = Lock()
|
Synerty/peek-plugin-base
|
peek_plugin_base/client/PeekPlatformDesktopHttpHookABC.py
|
PeekPlatformDesktopHttpHookABC.addDesktopResource
|
python
|
def addDesktopResource(self, pluginSubPath: bytes, resource: BasicResource) -> None:
pluginSubPath = pluginSubPath.strip(b'/')
self.__rootDesktopResource.putChild(pluginSubPath, resource)
|
Add Site Resource
Add a cusotom implementation of a served http resource.
:param pluginSubPath: The resource path where you want to serve this resource.
:param resource: The resource to serve.
:return: None
|
train
|
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/client/PeekPlatformDesktopHttpHookABC.py#L31-L42
| null |
class PeekPlatformDesktopHttpHookABC(metaclass=ABCMeta):
""" Peek Platform Site HTTP Hook
The methods provided by this class apply to the HTTP sites served by the
Client service for the mobile and desktop apps, and the Server service for the
admin app.
It is not the HTTP service that provides resources (vortex, etc) beween the server
and the agent, worker and client.
"""
def __init__(self):
self.__rootDesktopResource = FileUnderlayResource()
def addDesktopStaticResourceDir(self, dir: str) -> None:
""" Add Site Static Resource Directory
Calling this method sets up directory :code:`dir` to be served by the site.
:param dir: The file system directory to be served.
:return: None
"""
self.__rootDesktopResource.addFileSystemRoot(dir)
@property
def rootDesktopResource(self) -> BasicResource:
""" Site Root Resource
This returns the root site resource for this plugin.
"""
return self.__rootDesktopResource
|
Synerty/peek-plugin-base
|
peek_plugin_base/server/PluginServerStorageEntryHookABC.py
|
PluginServerStorageEntryHookABC._migrateStorageSchema
|
python
|
def _migrateStorageSchema(self, metadata: MetaData) -> None:
relDir = self._packageCfg.config.storage.alembicDir(require_string)
alembicDir = os.path.join(self.rootDir, relDir)
if not os.path.isdir(alembicDir): raise NotADirectoryError(alembicDir)
self._dbConn = DbConnection(
dbConnectString=self.platform.dbConnectString,
metadata=metadata,
alembicDir=alembicDir,
enableCreateAll=False
)
self._dbConn.migrate()
|
Initialise the DB
This method is called by the platform between the load() and start() calls.
There should be no need for a plugin to call this method it's self.
:param metadata: the SQLAlchemy metadata for this plugins schema
|
train
|
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/server/PluginServerStorageEntryHookABC.py#L15-L36
| null |
class PluginServerStorageEntryHookABC(metaclass=ABCMeta):
@property
def dbSessionCreator(self) -> DbSessionCreator:
""" Database Session
This is a helper property that can be used by the papp to get easy access to
the SQLAlchemy C{Session}
:return: An instance of the sqlalchemy ORM session
"""
return self._dbConn.ormSessionCreator
@property
def dbEngine(self) -> Engine:
""" DB Engine
This is a helper property that can be used by the papp to get easy access to
the SQLAlchemy C{Engine}
:return: The instance of the database engine for this plugin
"""
return self._dbConn._dbEngine
def prefetchDeclarativeIds(self, Declarative, count) -> Deferred:
""" Get PG Sequence Generator
A PostGreSQL sequence generator returns a chunk of IDs for the given
declarative.
:return: A generator that will provide the IDs
:rtype: an iterator, yielding the numbers to assign
"""
return self._dbConn.prefetchDeclarativeIds(Declarative=Declarative, count=count)
@abstractproperty
def dbMetadata(self) -> MetaData:
""" DB Metadata
This property returns an instance to the metadata from the ORM Declarative
on which, all the ORM classes have inherited.
This means the metadata knows about all the tables.
NOTE: The plugin must be constructed with a schema matching the plugin package
:return: The instance of the metadata for this plugin.
Example from peek_plugin_noop.storage.DeclarativeBase.py
--------------------------------------------------------
::
metadata = MetaData(schema="noop")
DeclarativeBase = declarative_base(metadata=metadata)
"""
pass
|
Synerty/peek-plugin-base
|
peek_plugin_base/server/PluginServerStorageEntryHookABC.py
|
PluginServerStorageEntryHookABC.prefetchDeclarativeIds
|
python
|
def prefetchDeclarativeIds(self, Declarative, count) -> Deferred:
return self._dbConn.prefetchDeclarativeIds(Declarative=Declarative, count=count)
|
Get PG Sequence Generator
A PostGreSQL sequence generator returns a chunk of IDs for the given
declarative.
:return: A generator that will provide the IDs
:rtype: an iterator, yielding the numbers to assign
|
train
|
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/server/PluginServerStorageEntryHookABC.py#L62-L72
| null |
class PluginServerStorageEntryHookABC(metaclass=ABCMeta):
def _migrateStorageSchema(self, metadata: MetaData) -> None:
""" Initialise the DB
This method is called by the platform between the load() and start() calls.
There should be no need for a plugin to call this method it's self.
:param metadata: the SQLAlchemy metadata for this plugins schema
"""
relDir = self._packageCfg.config.storage.alembicDir(require_string)
alembicDir = os.path.join(self.rootDir, relDir)
if not os.path.isdir(alembicDir): raise NotADirectoryError(alembicDir)
self._dbConn = DbConnection(
dbConnectString=self.platform.dbConnectString,
metadata=metadata,
alembicDir=alembicDir,
enableCreateAll=False
)
self._dbConn.migrate()
@property
def dbSessionCreator(self) -> DbSessionCreator:
""" Database Session
This is a helper property that can be used by the papp to get easy access to
the SQLAlchemy C{Session}
:return: An instance of the sqlalchemy ORM session
"""
return self._dbConn.ormSessionCreator
@property
def dbEngine(self) -> Engine:
""" DB Engine
This is a helper property that can be used by the papp to get easy access to
the SQLAlchemy C{Engine}
:return: The instance of the database engine for this plugin
"""
return self._dbConn._dbEngine
@abstractproperty
def dbMetadata(self) -> MetaData:
""" DB Metadata
This property returns an instance to the metadata from the ORM Declarative
on which, all the ORM classes have inherited.
This means the metadata knows about all the tables.
NOTE: The plugin must be constructed with a schema matching the plugin package
:return: The instance of the metadata for this plugin.
Example from peek_plugin_noop.storage.DeclarativeBase.py
--------------------------------------------------------
::
metadata = MetaData(schema="noop")
DeclarativeBase = declarative_base(metadata=metadata)
"""
pass
|
Synerty/peek-plugin-base
|
peek_plugin_base/storage/DbConnection.py
|
_commonPrefetchDeclarativeIds
|
python
|
def _commonPrefetchDeclarativeIds(engine, mutex,
Declarative, count) -> Optional[Iterable[int]]:
if not count:
logger.debug("Count was zero, no range returned")
return
conn = engine.connect()
transaction = conn.begin()
mutex.acquire()
try:
sequence = Sequence('%s_id_seq' % Declarative.__tablename__,
schema=Declarative.metadata.schema)
if isPostGreSQLDialect(engine):
sql = "SELECT setval('%(seq)s', (select nextval('%(seq)s') + %(add)s), true)"
sql %= {
'seq': '"%s"."%s"' % (sequence.schema, sequence.name),
'add': count
}
nextStartId = conn.execute(sql).fetchone()[0]
startId = nextStartId - count
elif isMssqlDialect(engine):
startId = conn.execute(
'SELECT NEXT VALUE FOR "%s"."%s"'
% (sequence.schema, sequence.name)
).fetchone()[0] + 1
nextStartId = startId + count
conn.execute('alter sequence "%s"."%s" restart with %s'
% (sequence.schema, sequence.name, nextStartId))
else:
raise NotImplementedError()
transaction.commit()
return iter(range(startId, nextStartId))
finally:
mutex.release()
conn.close()
|
Common Prefetch Declarative IDs
This function is used by the worker and server
|
train
|
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/storage/DbConnection.py#L221-L267
|
[
"def isMssqlDialect(engine):\n return isinstance(engine.dialect, MSDialect)\n",
"def isPostGreSQLDialect(engine):\n return isinstance(engine.dialect, PGDialect)\n"
] |
import logging
from textwrap import dedent
from threading import Lock
from typing import Optional, Dict, Union, Callable, Iterable
import sqlalchemy_utils
from pytmpdir.Directory import Directory
from sqlalchemy import create_engine
from sqlalchemy.engine.base import Engine
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.session import Session
from sqlalchemy.sql.schema import MetaData, Sequence
from peek_plugin_base.storage.AlembicEnvBase import ensureSchemaExists, isMssqlDialect, \
isPostGreSQLDialect
from vortex.DeferUtil import deferToThreadWrapWithLogger
logger = logging.getLogger(__name__)
DbSessionCreator = Callable[[], Session]
DelcarativeIdGen = Optional[Iterable[int]]
DeclarativeIdCreator = Callable[[object, int], DelcarativeIdGen]
class DbConnection:
def __init__(self, dbConnectString: str, metadata: MetaData, alembicDir: str,
dbEngineArgs: Optional[Dict[str, Union[str, int]]] = None,
enableForeignKeys=False, enableCreateAll=True):
""" SQLAlchemy Database Connection
This class takes care of migrating the database and establishing thing database
connections and ORM sessions.
:param dbConnectString:
The connection string for the DB.
See http://docs.sqlalchemy.org/en/latest/core/engines.html
:param metadata:
The instance of the metadata for this connection,
This is schema qualified MetaData(schema="schama_name")
:param alembicDir:
The absolute location of the alembic directory (versions dir lives under this)
:param dbEngineArgs:
The arguments to pass to the database engine, See
http://docs.sqlalchemy.org/en/latest/core/engines.html#engine-creation-api
:param enableCreateAll:
If the schema doesn't exist, then the migration is allowed
to use matadata.create_all()
:param enableForeignKeys:
Perform a check to ensure foriegn keys have indexes after the db is
migrated and connected.
"""
self._dbConnectString = dbConnectString
self._metadata = metadata
self._alembicDir = alembicDir
self._dbEngine = None
self._ScopedSession = None
self._dbEngineArgs = dbEngineArgs if dbEngineArgs else {"echo": False}
self._sequenceMutex = Lock()
self._enableForeignKeys = enableForeignKeys
self._enableCreateAll = enableCreateAll
def closeAllSessions(self):
""" Close All Session
Close all ORM sessions connected to this DB engine.
"""
self.ormSessionCreator() # Ensure we have a session maker and session
self._ScopedSession.close_all()
@property
def ormSessionCreator(self) -> DbSessionCreator:
""" Get Orm Session
:return: A SQLAlchemy session scoped for the callers thread..
"""
assert self._dbConnectString
if self._ScopedSession:
return self._ScopedSession
self._dbEngine = create_engine(
self._dbConnectString,
**self._dbEngineArgs
)
self._ScopedSession = scoped_session(
sessionmaker(bind=self._dbEngine))
return self._ScopedSession
@property
def dbEngine(self) -> Engine:
""" Get DB Engine
This is not thread safe, use the ormSesson to execute SQL statements instead.
self.ormSession.execute(...)
:return: the DB Engine used to connect to the database.
"""
return self._dbEngine
def migrate(self) -> None:
""" Migrate
Perform a database migration, upgrading to the latest schema level.
"""
assert self.ormSessionCreator, "ormSessionCreator is not defined"
connection = self._dbEngine.connect()
isDbInitialised = self._dbEngine.dialect.has_table(
connection, 'alembic_version',
schema=self._metadata.schema)
connection.close()
if isDbInitialised or not self._enableCreateAll:
self._doMigration(self._dbEngine)
else:
self._doCreateAll(self._dbEngine)
if self._enableForeignKeys:
self.checkForeignKeys(self._dbEngine)
def checkForeignKeys(self, engine: Engine) -> None:
""" Check Foreign Keys
Log any foreign keys that don't have indexes assigned to them.
This is a performance issue.
"""
missing = (sqlalchemy_utils.functions
.non_indexed_foreign_keys(self._metadata, engine=engine))
for table, keys in missing.items():
for key in keys:
logger.warning("Missing index on ForeignKey %s" % key.columns)
@deferToThreadWrapWithLogger(logger)
def prefetchDeclarativeIds(self, Declarative, count) -> DelcarativeIdGen:
""" Prefetch Declarative IDs
This function prefetches a chunk of IDs from a database sequence.
Doing this allows us to preallocate the IDs before an insert, which significantly
speeds up :
* Orm inserts, especially those using inheritance
* When we need the ID to assign it to a related object that we're also inserting.
:param Declarative: The SQLAlchemy declarative class.
(The class that inherits from DeclarativeBase)
:param count: The number of IDs to prefetch
:return: An iterable that dispenses the new IDs
"""
return _commonPrefetchDeclarativeIds(
self.dbEngine, self._sequenceMutex, Declarative, count
)
def _runAlembicCommand(self, command, *args):
configFile = self._writeAlembicIni()
from alembic.config import Config
alembic_cfg = Config(configFile.name)
command(alembic_cfg, *args)
def _doCreateAll(self, engine):
ensureSchemaExists(engine, self._metadata.schema)
self._metadata.create_all(engine)
from alembic import command
self._runAlembicCommand(command.stamp, "head")
def _writeAlembicIni(self):
cfg = '''
[alembic]
script_location = %(alembicDir)s
sourceless = true
sqlalchemy.url = %(url)s
[alembic:exclude]
tables = spatial_ref_sys
[logging]
default_level = INFO
'''
cfg = dedent(cfg)
cfg %= {'alembicDir': self._alembicDir,
'url': self._dbConnectString}
dir = Directory()
file = dir.createTempFile()
with file.open(write=True) as f:
f.write(cfg)
return file.namedTempFileReader()
def _doMigration(self, engine):
ensureSchemaExists(engine, self._metadata.schema)
from alembic import command
self._runAlembicCommand(command.upgrade, "head")
|
Synerty/peek-plugin-base
|
peek_plugin_base/storage/DbConnection.py
|
DbConnection.ormSessionCreator
|
python
|
def ormSessionCreator(self) -> DbSessionCreator:
assert self._dbConnectString
if self._ScopedSession:
return self._ScopedSession
self._dbEngine = create_engine(
self._dbConnectString,
**self._dbEngineArgs
)
self._ScopedSession = scoped_session(
sessionmaker(bind=self._dbEngine))
return self._ScopedSession
|
Get Orm Session
:return: A SQLAlchemy session scoped for the callers thread..
|
train
|
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/storage/DbConnection.py#L82-L100
| null |
class DbConnection:
def __init__(self, dbConnectString: str, metadata: MetaData, alembicDir: str,
dbEngineArgs: Optional[Dict[str, Union[str, int]]] = None,
enableForeignKeys=False, enableCreateAll=True):
""" SQLAlchemy Database Connection
This class takes care of migrating the database and establishing thing database
connections and ORM sessions.
:param dbConnectString:
The connection string for the DB.
See http://docs.sqlalchemy.org/en/latest/core/engines.html
:param metadata:
The instance of the metadata for this connection,
This is schema qualified MetaData(schema="schama_name")
:param alembicDir:
The absolute location of the alembic directory (versions dir lives under this)
:param dbEngineArgs:
The arguments to pass to the database engine, See
http://docs.sqlalchemy.org/en/latest/core/engines.html#engine-creation-api
:param enableCreateAll:
If the schema doesn't exist, then the migration is allowed
to use matadata.create_all()
:param enableForeignKeys:
Perform a check to ensure foriegn keys have indexes after the db is
migrated and connected.
"""
self._dbConnectString = dbConnectString
self._metadata = metadata
self._alembicDir = alembicDir
self._dbEngine = None
self._ScopedSession = None
self._dbEngineArgs = dbEngineArgs if dbEngineArgs else {"echo": False}
self._sequenceMutex = Lock()
self._enableForeignKeys = enableForeignKeys
self._enableCreateAll = enableCreateAll
def closeAllSessions(self):
""" Close All Session
Close all ORM sessions connected to this DB engine.
"""
self.ormSessionCreator() # Ensure we have a session maker and session
self._ScopedSession.close_all()
@property
@property
def dbEngine(self) -> Engine:
""" Get DB Engine
This is not thread safe, use the ormSesson to execute SQL statements instead.
self.ormSession.execute(...)
:return: the DB Engine used to connect to the database.
"""
return self._dbEngine
def migrate(self) -> None:
""" Migrate
Perform a database migration, upgrading to the latest schema level.
"""
assert self.ormSessionCreator, "ormSessionCreator is not defined"
connection = self._dbEngine.connect()
isDbInitialised = self._dbEngine.dialect.has_table(
connection, 'alembic_version',
schema=self._metadata.schema)
connection.close()
if isDbInitialised or not self._enableCreateAll:
self._doMigration(self._dbEngine)
else:
self._doCreateAll(self._dbEngine)
if self._enableForeignKeys:
self.checkForeignKeys(self._dbEngine)
def checkForeignKeys(self, engine: Engine) -> None:
""" Check Foreign Keys
Log any foreign keys that don't have indexes assigned to them.
This is a performance issue.
"""
missing = (sqlalchemy_utils.functions
.non_indexed_foreign_keys(self._metadata, engine=engine))
for table, keys in missing.items():
for key in keys:
logger.warning("Missing index on ForeignKey %s" % key.columns)
@deferToThreadWrapWithLogger(logger)
def prefetchDeclarativeIds(self, Declarative, count) -> DelcarativeIdGen:
""" Prefetch Declarative IDs
This function prefetches a chunk of IDs from a database sequence.
Doing this allows us to preallocate the IDs before an insert, which significantly
speeds up :
* Orm inserts, especially those using inheritance
* When we need the ID to assign it to a related object that we're also inserting.
:param Declarative: The SQLAlchemy declarative class.
(The class that inherits from DeclarativeBase)
:param count: The number of IDs to prefetch
:return: An iterable that dispenses the new IDs
"""
return _commonPrefetchDeclarativeIds(
self.dbEngine, self._sequenceMutex, Declarative, count
)
def _runAlembicCommand(self, command, *args):
configFile = self._writeAlembicIni()
from alembic.config import Config
alembic_cfg = Config(configFile.name)
command(alembic_cfg, *args)
def _doCreateAll(self, engine):
ensureSchemaExists(engine, self._metadata.schema)
self._metadata.create_all(engine)
from alembic import command
self._runAlembicCommand(command.stamp, "head")
def _writeAlembicIni(self):
cfg = '''
[alembic]
script_location = %(alembicDir)s
sourceless = true
sqlalchemy.url = %(url)s
[alembic:exclude]
tables = spatial_ref_sys
[logging]
default_level = INFO
'''
cfg = dedent(cfg)
cfg %= {'alembicDir': self._alembicDir,
'url': self._dbConnectString}
dir = Directory()
file = dir.createTempFile()
with file.open(write=True) as f:
f.write(cfg)
return file.namedTempFileReader()
def _doMigration(self, engine):
ensureSchemaExists(engine, self._metadata.schema)
from alembic import command
self._runAlembicCommand(command.upgrade, "head")
|
Synerty/peek-plugin-base
|
peek_plugin_base/storage/DbConnection.py
|
DbConnection.migrate
|
python
|
def migrate(self) -> None:
assert self.ormSessionCreator, "ormSessionCreator is not defined"
connection = self._dbEngine.connect()
isDbInitialised = self._dbEngine.dialect.has_table(
connection, 'alembic_version',
schema=self._metadata.schema)
connection.close()
if isDbInitialised or not self._enableCreateAll:
self._doMigration(self._dbEngine)
else:
self._doCreateAll(self._dbEngine)
if self._enableForeignKeys:
self.checkForeignKeys(self._dbEngine)
|
Migrate
Perform a database migration, upgrading to the latest schema level.
|
train
|
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/storage/DbConnection.py#L114-L135
|
[
"def checkForeignKeys(self, engine: Engine) -> None:\n \"\"\" Check Foreign Keys\n\n Log any foreign keys that don't have indexes assigned to them.\n This is a performance issue.\n\n \"\"\"\n missing = (sqlalchemy_utils.functions\n .non_indexed_foreign_keys(self._metadata, engine=engine))\n\n for table, keys in missing.items():\n for key in keys:\n logger.warning(\"Missing index on ForeignKey %s\" % key.columns)\n",
"def _doCreateAll(self, engine):\n ensureSchemaExists(engine, self._metadata.schema)\n self._metadata.create_all(engine)\n\n from alembic import command\n self._runAlembicCommand(command.stamp, \"head\")\n",
"def _doMigration(self, engine):\n ensureSchemaExists(engine, self._metadata.schema)\n\n from alembic import command\n self._runAlembicCommand(command.upgrade, \"head\")\n"
] |
class DbConnection:
def __init__(self, dbConnectString: str, metadata: MetaData, alembicDir: str,
dbEngineArgs: Optional[Dict[str, Union[str, int]]] = None,
enableForeignKeys=False, enableCreateAll=True):
""" SQLAlchemy Database Connection
This class takes care of migrating the database and establishing thing database
connections and ORM sessions.
:param dbConnectString:
The connection string for the DB.
See http://docs.sqlalchemy.org/en/latest/core/engines.html
:param metadata:
The instance of the metadata for this connection,
This is schema qualified MetaData(schema="schama_name")
:param alembicDir:
The absolute location of the alembic directory (versions dir lives under this)
:param dbEngineArgs:
The arguments to pass to the database engine, See
http://docs.sqlalchemy.org/en/latest/core/engines.html#engine-creation-api
:param enableCreateAll:
If the schema doesn't exist, then the migration is allowed
to use matadata.create_all()
:param enableForeignKeys:
Perform a check to ensure foriegn keys have indexes after the db is
migrated and connected.
"""
self._dbConnectString = dbConnectString
self._metadata = metadata
self._alembicDir = alembicDir
self._dbEngine = None
self._ScopedSession = None
self._dbEngineArgs = dbEngineArgs if dbEngineArgs else {"echo": False}
self._sequenceMutex = Lock()
self._enableForeignKeys = enableForeignKeys
self._enableCreateAll = enableCreateAll
def closeAllSessions(self):
""" Close All Session
Close all ORM sessions connected to this DB engine.
"""
self.ormSessionCreator() # Ensure we have a session maker and session
self._ScopedSession.close_all()
@property
def ormSessionCreator(self) -> DbSessionCreator:
""" Get Orm Session
:return: A SQLAlchemy session scoped for the callers thread..
"""
assert self._dbConnectString
if self._ScopedSession:
return self._ScopedSession
self._dbEngine = create_engine(
self._dbConnectString,
**self._dbEngineArgs
)
self._ScopedSession = scoped_session(
sessionmaker(bind=self._dbEngine))
return self._ScopedSession
@property
def dbEngine(self) -> Engine:
""" Get DB Engine
This is not thread safe, use the ormSesson to execute SQL statements instead.
self.ormSession.execute(...)
:return: the DB Engine used to connect to the database.
"""
return self._dbEngine
def checkForeignKeys(self, engine: Engine) -> None:
""" Check Foreign Keys
Log any foreign keys that don't have indexes assigned to them.
This is a performance issue.
"""
missing = (sqlalchemy_utils.functions
.non_indexed_foreign_keys(self._metadata, engine=engine))
for table, keys in missing.items():
for key in keys:
logger.warning("Missing index on ForeignKey %s" % key.columns)
@deferToThreadWrapWithLogger(logger)
def prefetchDeclarativeIds(self, Declarative, count) -> DelcarativeIdGen:
""" Prefetch Declarative IDs
This function prefetches a chunk of IDs from a database sequence.
Doing this allows us to preallocate the IDs before an insert, which significantly
speeds up :
* Orm inserts, especially those using inheritance
* When we need the ID to assign it to a related object that we're also inserting.
:param Declarative: The SQLAlchemy declarative class.
(The class that inherits from DeclarativeBase)
:param count: The number of IDs to prefetch
:return: An iterable that dispenses the new IDs
"""
return _commonPrefetchDeclarativeIds(
self.dbEngine, self._sequenceMutex, Declarative, count
)
def _runAlembicCommand(self, command, *args):
configFile = self._writeAlembicIni()
from alembic.config import Config
alembic_cfg = Config(configFile.name)
command(alembic_cfg, *args)
def _doCreateAll(self, engine):
ensureSchemaExists(engine, self._metadata.schema)
self._metadata.create_all(engine)
from alembic import command
self._runAlembicCommand(command.stamp, "head")
def _writeAlembicIni(self):
cfg = '''
[alembic]
script_location = %(alembicDir)s
sourceless = true
sqlalchemy.url = %(url)s
[alembic:exclude]
tables = spatial_ref_sys
[logging]
default_level = INFO
'''
cfg = dedent(cfg)
cfg %= {'alembicDir': self._alembicDir,
'url': self._dbConnectString}
dir = Directory()
file = dir.createTempFile()
with file.open(write=True) as f:
f.write(cfg)
return file.namedTempFileReader()
def _doMigration(self, engine):
ensureSchemaExists(engine, self._metadata.schema)
from alembic import command
self._runAlembicCommand(command.upgrade, "head")
|
Synerty/peek-plugin-base
|
peek_plugin_base/storage/DbConnection.py
|
DbConnection.checkForeignKeys
|
python
|
def checkForeignKeys(self, engine: Engine) -> None:
missing = (sqlalchemy_utils.functions
.non_indexed_foreign_keys(self._metadata, engine=engine))
for table, keys in missing.items():
for key in keys:
logger.warning("Missing index on ForeignKey %s" % key.columns)
|
Check Foreign Keys
Log any foreign keys that don't have indexes assigned to them.
This is a performance issue.
|
train
|
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/storage/DbConnection.py#L137-L149
| null |
class DbConnection:
def __init__(self, dbConnectString: str, metadata: MetaData, alembicDir: str,
dbEngineArgs: Optional[Dict[str, Union[str, int]]] = None,
enableForeignKeys=False, enableCreateAll=True):
""" SQLAlchemy Database Connection
This class takes care of migrating the database and establishing thing database
connections and ORM sessions.
:param dbConnectString:
The connection string for the DB.
See http://docs.sqlalchemy.org/en/latest/core/engines.html
:param metadata:
The instance of the metadata for this connection,
This is schema qualified MetaData(schema="schama_name")
:param alembicDir:
The absolute location of the alembic directory (versions dir lives under this)
:param dbEngineArgs:
The arguments to pass to the database engine, See
http://docs.sqlalchemy.org/en/latest/core/engines.html#engine-creation-api
:param enableCreateAll:
If the schema doesn't exist, then the migration is allowed
to use matadata.create_all()
:param enableForeignKeys:
Perform a check to ensure foriegn keys have indexes after the db is
migrated and connected.
"""
self._dbConnectString = dbConnectString
self._metadata = metadata
self._alembicDir = alembicDir
self._dbEngine = None
self._ScopedSession = None
self._dbEngineArgs = dbEngineArgs if dbEngineArgs else {"echo": False}
self._sequenceMutex = Lock()
self._enableForeignKeys = enableForeignKeys
self._enableCreateAll = enableCreateAll
def closeAllSessions(self):
""" Close All Session
Close all ORM sessions connected to this DB engine.
"""
self.ormSessionCreator() # Ensure we have a session maker and session
self._ScopedSession.close_all()
@property
def ormSessionCreator(self) -> DbSessionCreator:
""" Get Orm Session
:return: A SQLAlchemy session scoped for the callers thread..
"""
assert self._dbConnectString
if self._ScopedSession:
return self._ScopedSession
self._dbEngine = create_engine(
self._dbConnectString,
**self._dbEngineArgs
)
self._ScopedSession = scoped_session(
sessionmaker(bind=self._dbEngine))
return self._ScopedSession
@property
def dbEngine(self) -> Engine:
""" Get DB Engine
This is not thread safe, use the ormSesson to execute SQL statements instead.
self.ormSession.execute(...)
:return: the DB Engine used to connect to the database.
"""
return self._dbEngine
def migrate(self) -> None:
""" Migrate
Perform a database migration, upgrading to the latest schema level.
"""
assert self.ormSessionCreator, "ormSessionCreator is not defined"
connection = self._dbEngine.connect()
isDbInitialised = self._dbEngine.dialect.has_table(
connection, 'alembic_version',
schema=self._metadata.schema)
connection.close()
if isDbInitialised or not self._enableCreateAll:
self._doMigration(self._dbEngine)
else:
self._doCreateAll(self._dbEngine)
if self._enableForeignKeys:
self.checkForeignKeys(self._dbEngine)
@deferToThreadWrapWithLogger(logger)
def prefetchDeclarativeIds(self, Declarative, count) -> DelcarativeIdGen:
""" Prefetch Declarative IDs
This function prefetches a chunk of IDs from a database sequence.
Doing this allows us to preallocate the IDs before an insert, which significantly
speeds up :
* Orm inserts, especially those using inheritance
* When we need the ID to assign it to a related object that we're also inserting.
:param Declarative: The SQLAlchemy declarative class.
(The class that inherits from DeclarativeBase)
:param count: The number of IDs to prefetch
:return: An iterable that dispenses the new IDs
"""
return _commonPrefetchDeclarativeIds(
self.dbEngine, self._sequenceMutex, Declarative, count
)
def _runAlembicCommand(self, command, *args):
configFile = self._writeAlembicIni()
from alembic.config import Config
alembic_cfg = Config(configFile.name)
command(alembic_cfg, *args)
def _doCreateAll(self, engine):
ensureSchemaExists(engine, self._metadata.schema)
self._metadata.create_all(engine)
from alembic import command
self._runAlembicCommand(command.stamp, "head")
def _writeAlembicIni(self):
cfg = '''
[alembic]
script_location = %(alembicDir)s
sourceless = true
sqlalchemy.url = %(url)s
[alembic:exclude]
tables = spatial_ref_sys
[logging]
default_level = INFO
'''
cfg = dedent(cfg)
cfg %= {'alembicDir': self._alembicDir,
'url': self._dbConnectString}
dir = Directory()
file = dir.createTempFile()
with file.open(write=True) as f:
f.write(cfg)
return file.namedTempFileReader()
def _doMigration(self, engine):
ensureSchemaExists(engine, self._metadata.schema)
from alembic import command
self._runAlembicCommand(command.upgrade, "head")
|
Synerty/peek-plugin-base
|
peek_plugin_base/storage/DbConnection.py
|
DbConnection.prefetchDeclarativeIds
|
python
|
def prefetchDeclarativeIds(self, Declarative, count) -> DelcarativeIdGen:
return _commonPrefetchDeclarativeIds(
self.dbEngine, self._sequenceMutex, Declarative, count
)
|
Prefetch Declarative IDs
This function prefetches a chunk of IDs from a database sequence.
Doing this allows us to preallocate the IDs before an insert, which significantly
speeds up :
* Orm inserts, especially those using inheritance
* When we need the ID to assign it to a related object that we're also inserting.
:param Declarative: The SQLAlchemy declarative class.
(The class that inherits from DeclarativeBase)
:param count: The number of IDs to prefetch
:return: An iterable that dispenses the new IDs
|
train
|
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/storage/DbConnection.py#L152-L171
|
[
"def _commonPrefetchDeclarativeIds(engine, mutex,\n Declarative, count) -> Optional[Iterable[int]]:\n \"\"\" Common Prefetch Declarative IDs\n\n This function is used by the worker and server\n \"\"\"\n if not count:\n logger.debug(\"Count was zero, no range returned\")\n return\n\n conn = engine.connect()\n transaction = conn.begin()\n mutex.acquire()\n try:\n sequence = Sequence('%s_id_seq' % Declarative.__tablename__,\n schema=Declarative.metadata.schema)\n\n if isPostGreSQLDialect(engine):\n sql = \"SELECT setval('%(seq)s', (select nextval('%(seq)s') + %(add)s), true)\"\n sql %= {\n 'seq': '\"%s\".\"%s\"' % (sequence.schema, sequence.name),\n 'add': count\n }\n nextStartId = conn.execute(sql).fetchone()[0]\n startId = nextStartId - count\n\n elif isMssqlDialect(engine):\n startId = conn.execute(\n 'SELECT NEXT VALUE FOR \"%s\".\"%s\"'\n % (sequence.schema, sequence.name)\n ).fetchone()[0] + 1\n\n nextStartId = startId + count\n\n conn.execute('alter sequence \"%s\".\"%s\" restart with %s'\n % (sequence.schema, sequence.name, nextStartId))\n\n else:\n raise NotImplementedError()\n\n transaction.commit()\n\n return iter(range(startId, nextStartId))\n\n finally:\n mutex.release()\n conn.close()\n"
] |
class DbConnection:
def __init__(self, dbConnectString: str, metadata: MetaData, alembicDir: str,
dbEngineArgs: Optional[Dict[str, Union[str, int]]] = None,
enableForeignKeys=False, enableCreateAll=True):
""" SQLAlchemy Database Connection
This class takes care of migrating the database and establishing thing database
connections and ORM sessions.
:param dbConnectString:
The connection string for the DB.
See http://docs.sqlalchemy.org/en/latest/core/engines.html
:param metadata:
The instance of the metadata for this connection,
This is schema qualified MetaData(schema="schama_name")
:param alembicDir:
The absolute location of the alembic directory (versions dir lives under this)
:param dbEngineArgs:
The arguments to pass to the database engine, See
http://docs.sqlalchemy.org/en/latest/core/engines.html#engine-creation-api
:param enableCreateAll:
If the schema doesn't exist, then the migration is allowed
to use matadata.create_all()
:param enableForeignKeys:
Perform a check to ensure foriegn keys have indexes after the db is
migrated and connected.
"""
self._dbConnectString = dbConnectString
self._metadata = metadata
self._alembicDir = alembicDir
self._dbEngine = None
self._ScopedSession = None
self._dbEngineArgs = dbEngineArgs if dbEngineArgs else {"echo": False}
self._sequenceMutex = Lock()
self._enableForeignKeys = enableForeignKeys
self._enableCreateAll = enableCreateAll
def closeAllSessions(self):
""" Close All Session
Close all ORM sessions connected to this DB engine.
"""
self.ormSessionCreator() # Ensure we have a session maker and session
self._ScopedSession.close_all()
@property
def ormSessionCreator(self) -> DbSessionCreator:
""" Get Orm Session
:return: A SQLAlchemy session scoped for the callers thread..
"""
assert self._dbConnectString
if self._ScopedSession:
return self._ScopedSession
self._dbEngine = create_engine(
self._dbConnectString,
**self._dbEngineArgs
)
self._ScopedSession = scoped_session(
sessionmaker(bind=self._dbEngine))
return self._ScopedSession
@property
def dbEngine(self) -> Engine:
""" Get DB Engine
This is not thread safe, use the ormSesson to execute SQL statements instead.
self.ormSession.execute(...)
:return: the DB Engine used to connect to the database.
"""
return self._dbEngine
def migrate(self) -> None:
""" Migrate
Perform a database migration, upgrading to the latest schema level.
"""
assert self.ormSessionCreator, "ormSessionCreator is not defined"
connection = self._dbEngine.connect()
isDbInitialised = self._dbEngine.dialect.has_table(
connection, 'alembic_version',
schema=self._metadata.schema)
connection.close()
if isDbInitialised or not self._enableCreateAll:
self._doMigration(self._dbEngine)
else:
self._doCreateAll(self._dbEngine)
if self._enableForeignKeys:
self.checkForeignKeys(self._dbEngine)
def checkForeignKeys(self, engine: Engine) -> None:
""" Check Foreign Keys
Log any foreign keys that don't have indexes assigned to them.
This is a performance issue.
"""
missing = (sqlalchemy_utils.functions
.non_indexed_foreign_keys(self._metadata, engine=engine))
for table, keys in missing.items():
for key in keys:
logger.warning("Missing index on ForeignKey %s" % key.columns)
@deferToThreadWrapWithLogger(logger)
def _runAlembicCommand(self, command, *args):
configFile = self._writeAlembicIni()
from alembic.config import Config
alembic_cfg = Config(configFile.name)
command(alembic_cfg, *args)
def _doCreateAll(self, engine):
ensureSchemaExists(engine, self._metadata.schema)
self._metadata.create_all(engine)
from alembic import command
self._runAlembicCommand(command.stamp, "head")
def _writeAlembicIni(self):
cfg = '''
[alembic]
script_location = %(alembicDir)s
sourceless = true
sqlalchemy.url = %(url)s
[alembic:exclude]
tables = spatial_ref_sys
[logging]
default_level = INFO
'''
cfg = dedent(cfg)
cfg %= {'alembicDir': self._alembicDir,
'url': self._dbConnectString}
dir = Directory()
file = dir.createTempFile()
with file.open(write=True) as f:
f.write(cfg)
return file.namedTempFileReader()
def _doMigration(self, engine):
ensureSchemaExists(engine, self._metadata.schema)
from alembic import command
self._runAlembicCommand(command.upgrade, "head")
|
Synerty/peek-plugin-base
|
peek_plugin_base/storage/StorageUtil.py
|
makeOrmValuesSubqueryCondition
|
python
|
def makeOrmValuesSubqueryCondition(ormSession, column, values: List[Union[int, str]]):
if isPostGreSQLDialect(ormSession.bind):
return column.in_(values)
if not isMssqlDialect(ormSession.bind):
raise NotImplementedError()
sql = _createMssqlSqlText(values)
sub_qry = ormSession.query(column) # Any column, it just assigns a name
sub_qry = sub_qry.from_statement(sql)
return column.in_(sub_qry)
|
Make Orm Values Subquery
:param ormSession: The orm session instance
:param column: The column from the Declarative table, eg TableItem.colName
:param values: A list of string or int values
|
train
|
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/storage/StorageUtil.py#L25-L43
|
[
"def isMssqlDialect(engine):\n return isinstance(engine.dialect, MSDialect)\n",
"def isPostGreSQLDialect(engine):\n return isinstance(engine.dialect, PGDialect)\n"
] |
from typing import List, Union
from sqlalchemy import text
from peek_plugin_base.storage.AlembicEnvBase import isMssqlDialect, isPostGreSQLDialect
def _createMssqlSqlText(values: List[Union[int, str]]) -> str:
if not values:
name = "peekCsvVarcharToTable" # Either will do
elif isinstance(values[0], str):
name = "peekCsvVarcharToTable"
elif isinstance(values[0], int):
name = "peekCsvIntToTable"
values = [str(v) for v in values]
else:
raise NotImplementedError("The value supplies isn't a str or int, %s", values[0])
return text("SELECT * FROM [dbo].[%s]('%s')" % (name, ','.join(values)))
def makeCoreValuesSubqueryCondition(engine, column, values: List[Union[int, str]]):
""" Make Core Values Subquery
:param engine: The database engine, used to determine the dialect
:param column: The column, eg TableItem.__table__.c.colName
:param values: A list of string or int values
"""
if isPostGreSQLDialect(engine):
return column.in_(values)
if not isMssqlDialect(engine):
raise NotImplementedError()
sql = _createMssqlSqlText(values)
return column.in_(sql)
|
Synerty/peek-plugin-base
|
peek_plugin_base/storage/StorageUtil.py
|
makeCoreValuesSubqueryCondition
|
python
|
def makeCoreValuesSubqueryCondition(engine, column, values: List[Union[int, str]]):
if isPostGreSQLDialect(engine):
return column.in_(values)
if not isMssqlDialect(engine):
raise NotImplementedError()
sql = _createMssqlSqlText(values)
return column.in_(sql)
|
Make Core Values Subquery
:param engine: The database engine, used to determine the dialect
:param column: The column, eg TableItem.__table__.c.colName
:param values: A list of string or int values
|
train
|
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/storage/StorageUtil.py#L46-L62
|
[
"def isMssqlDialect(engine):\n return isinstance(engine.dialect, MSDialect)\n",
"def isPostGreSQLDialect(engine):\n return isinstance(engine.dialect, PGDialect)\n"
] |
from typing import List, Union
from sqlalchemy import text
from peek_plugin_base.storage.AlembicEnvBase import isMssqlDialect, isPostGreSQLDialect
def _createMssqlSqlText(values: List[Union[int, str]]) -> str:
if not values:
name = "peekCsvVarcharToTable" # Either will do
elif isinstance(values[0], str):
name = "peekCsvVarcharToTable"
elif isinstance(values[0], int):
name = "peekCsvIntToTable"
values = [str(v) for v in values]
else:
raise NotImplementedError("The value supplies isn't a str or int, %s", values[0])
return text("SELECT * FROM [dbo].[%s]('%s')" % (name, ','.join(values)))
def makeOrmValuesSubqueryCondition(ormSession, column, values: List[Union[int, str]]):
""" Make Orm Values Subquery
:param ormSession: The orm session instance
:param column: The column from the Declarative table, eg TableItem.colName
:param values: A list of string or int values
"""
if isPostGreSQLDialect(ormSession.bind):
return column.in_(values)
if not isMssqlDialect(ormSession.bind):
raise NotImplementedError()
sql = _createMssqlSqlText(values)
sub_qry = ormSession.query(column) # Any column, it just assigns a name
sub_qry = sub_qry.from_statement(sql)
return column.in_(sub_qry)
|
scivision/gridaurora
|
gridaurora/__init__.py
|
to_ut1unix
|
python
|
def to_ut1unix(time: Union[str, datetime, float, np.ndarray]) -> np.ndarray:
# keep this order
time = totime(time)
if isinstance(time, (float, int)):
return time
if isinstance(time, (tuple, list, np.ndarray)):
assert isinstance(time[0], datetime), f'expected datetime, not {type(time[0])}'
return np.array(list(map(dt2ut1, time)))
else:
assert isinstance(time, datetime)
return dt2ut1(time)
|
converts time inputs to UT1 seconds since Unix epoch
|
train
|
https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/__init__.py#L28-L43
|
[
"def totime(time: Union[str, datetime, np.datetime64]) -> np.ndarray:\n time = np.atleast_1d(time)\n\n if isinstance(time[0], (datetime, np.datetime64)):\n pass\n elif isinstance(time[0], str):\n time = np.atleast_1d(list(map(parse, time)))\n\n return time.squeeze()[()]\n"
] |
from datetime import datetime, date
from dateutil.parser import parse
import numpy as np
import logging
from typing import Union
def toyearmon(time: datetime) -> int:
# %% date handle
if isinstance(time, (tuple, list, np.ndarray)):
logging.warning(f'taking only first time {time[0]}, would you like multiple times upgrade to code?')
time = time[0]
if isinstance(time, str):
time = parse(time)
elif isinstance(time, np.datetime64):
time = time.astype(datetime)
elif isinstance(time, (datetime, date)):
pass
else:
raise TypeError(f'not sure what to do with type {type(time)}')
ym = int(f'{time.year:d}{time.month:02d}')
return ym
def dt2ut1(t: datetime) -> float:
epoch = datetime(1970, 1, 1)
assert isinstance(t, datetime)
return (t-epoch).total_seconds()
def totime(time: Union[str, datetime, np.datetime64]) -> np.ndarray:
time = np.atleast_1d(time)
if isinstance(time[0], (datetime, np.datetime64)):
pass
elif isinstance(time[0], str):
time = np.atleast_1d(list(map(parse, time)))
return time.squeeze()[()]
def chapman_profile(Z0: float, zKM: np.ndarray, H: float):
"""
Z0: altitude [km] of intensity peak
zKM: altitude grid [km]
H: scale height [km]
example:
pz = chapman_profile(110,np.arange(90,200,1),20)
"""
return np.exp(.5*(1-(zKM-Z0)/H - np.exp((Z0-zKM)/H)))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.