text stringlengths 81 112k |
|---|
Query and parse Nginx Web Server Status Page.
def initStats(self):
"""Query and parse Nginx Web Server Status Page."""
url = "%s://%s:%d/%s" % (self._proto, self._host, self._port,
self._statuspath)
response = util.get_url(url, self._user, self._password)
self._statusDict = {}
for line in response.splitlines():
mobj = re.match('\s*(\d+)\s+(\d+)\s+(\d+)\s*$', line)
if mobj:
idx = 0
for key in ('accepts','handled','requests'):
idx += 1
self._statusDict[key] = util.parse_value(mobj.group(idx))
else:
for (key,val) in re.findall('(\w+):\s*(\d+)', line):
self._statusDict[key.lower()] = util.parse_value(val) |
Read a file into a string
def read_file(filename):
"""Read a file into a string"""
path = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(path, filename)
try:
return open(filepath).read()
except IOError:
return '' |
Retrieve values for graphs.
def retrieveVals(self):
"""Retrieve values for graphs."""
proc_info = ProcessInfo()
stats = {}
for (prefix, is_thread) in (('proc', False),
('thread', True)):
graph_name = '%s_status' % prefix
if self.hasGraph(graph_name):
if not stats.has_key(prefix):
stats[prefix] = proc_info.getProcStatStatus(is_thread)
for (fname, stat_key) in (
('unint_sleep', 'uninterruptable_sleep'),
('stopped', 'stopped'),
('defunct', 'defunct'),
('running', 'running'),
('sleep', 'sleep')):
self.setGraphVal(graph_name, fname,
stats[prefix]['status'].get(stat_key))
graph_name = '%s_prio' % prefix
if self.hasGraph(graph_name):
if not stats.has_key(prefix):
stats[prefix] = proc_info.getProcStatStatus(is_thread)
for (fname, stat_key) in (
('high', 'high'),
('low', 'low'),
('norm', 'norm'),
('locked', 'locked_in_mem')):
self.setGraphVal(graph_name, fname,
stats[prefix]['prio'].get(stat_key)) |
Retrieve values for graphs.
def retrieveVals(self):
"""Retrieve values for graphs."""
file_stats = self._fileInfo.getContainerStats()
for contname in self._fileContList:
stats = file_stats.get(contname)
if stats is not None:
if self.hasGraph('rackspace_cloudfiles_container_size'):
self.setGraphVal('rackspace_cloudfiles_container_size', contname,
stats.get('size'))
if self.hasGraph('rackspace_cloudfiles_container_count'):
self.setGraphVal('rackspace_cloudfiles_container_count', contname,
stats.get('count')) |
General plot function that groups data by subject/list number and performs analysis.
Parameters
----------
results : quail.FriedEgg
Object containing results
subjgroup : list of strings or ints
String/int variables indicating how to group over subjects. Must be
the length of the number of subjects
subjname : string
Name of the subject grouping variable
listgroup : list of strings or ints
String/int variables indicating how to group over list. Must be
the length of the number of lists
listname : string
Name of the list grouping variable
subjconds : list
List of subject hues (str) to plot
listconds : list
List of list hues (str) to plot
plot_type : string
Specifies the type of plot. If list (default), the list groupings (listgroup)
will determine the plot grouping. If subject, the subject groupings
(subjgroup) will determine the plot grouping. If split (currenty just
works for accuracy plots), both listgroup and subjgroup will determine
the plot groupings
plot_style : string
Specifies the style of the plot. This currently works only for
accuracy and fingerprint plots. The plot style can be bar (default for
accruacy plot), violin (default for fingerprint plots) or swarm.
title : string
The title of the plot
legend : bool
If true (default), a legend is plotted.
ylim : list of numbers
A ymin/max can be specified by a list of the form [ymin, ymax]
xlim : list of numbers
A xmin/max can be specified by a list of the form [xmin, xmax]
save_path : str
Path to save out figure. Include the file extension, e.g.
save_path='figure.pdf'
show : bool
If False, do not show figure, but still return ax handle (default True).
ax : Matplotlib.Axes object or None
A plot object to draw to. If None, a new one is created and returned.
Returns
----------
ax : matplotlib.Axes.Axis
An axis handle for the figure
def plot(results, subjgroup=None, subjname='Subject Group', listgroup=None,
listname='List', subjconds=None, listconds=None, plot_type=None,
plot_style=None, title=None, legend=True, xlim=None, ylim=None,
save_path=None, show=True, ax=None, **kwargs):
"""
General plot function that groups data by subject/list number and performs analysis.
Parameters
----------
results : quail.FriedEgg
Object containing results
subjgroup : list of strings or ints
String/int variables indicating how to group over subjects. Must be
the length of the number of subjects
subjname : string
Name of the subject grouping variable
listgroup : list of strings or ints
String/int variables indicating how to group over list. Must be
the length of the number of lists
listname : string
Name of the list grouping variable
subjconds : list
List of subject hues (str) to plot
listconds : list
List of list hues (str) to plot
plot_type : string
Specifies the type of plot. If list (default), the list groupings (listgroup)
will determine the plot grouping. If subject, the subject groupings
(subjgroup) will determine the plot grouping. If split (currenty just
works for accuracy plots), both listgroup and subjgroup will determine
the plot groupings
plot_style : string
Specifies the style of the plot. This currently works only for
accuracy and fingerprint plots. The plot style can be bar (default for
accruacy plot), violin (default for fingerprint plots) or swarm.
title : string
The title of the plot
legend : bool
If true (default), a legend is plotted.
ylim : list of numbers
A ymin/max can be specified by a list of the form [ymin, ymax]
xlim : list of numbers
A xmin/max can be specified by a list of the form [xmin, xmax]
save_path : str
Path to save out figure. Include the file extension, e.g.
save_path='figure.pdf'
show : bool
If False, do not show figure, but still return ax handle (default True).
ax : Matplotlib.Axes object or None
A plot object to draw to. If None, a new one is created and returned.
Returns
----------
ax : matplotlib.Axes.Axis
An axis handle for the figure
"""
def plot_acc(data, plot_style, plot_type, listname, subjname, **kwargs):
# set defaul style to bar
plot_style = plot_style if plot_style is not None else 'bar'
plot_type = plot_type if plot_type is not None else 'list'
if plot_style is 'bar':
plot_func = sns.barplot
elif plot_style is 'swarm':
plot_func = sns.swarmplot
elif plot_style is 'violin':
plot_func = sns.violinplot
if plot_type is 'list':
ax = plot_func(data=data, x=listname, y="Accuracy", **kwargs)
elif plot_type is 'subject':
ax = plot_func(data=data, x=subjname, y="Accuracy", **kwargs)
elif plot_type is 'split':
ax = plot_func(data=data, x=subjname, y="Accuracy", hue=listname, **kwargs)
return ax
def plot_temporal(data, plot_style, plot_type, listname, subjname, **kwargs):
# set default style to bar
plot_style = plot_style if plot_style is not None else 'bar'
plot_type = plot_type if plot_type is not None else 'list'
if plot_style is 'bar':
plot_func = sns.barplot
elif plot_style is 'swarm':
plot_func = sns.swarmplot
elif plot_style is 'violin':
plot_func = sns.violinplot
if plot_type is 'list':
ax = plot_func(data=data, x=listname, y="Temporal Clustering Score", **kwargs)
elif plot_type is 'subject':
ax = plot_func(data=data, x=subjname, y="Temporal Clustering Score", **kwargs)
elif plot_type is 'split':
ax = plot_func(data=data, x=subjname, y="Temporal Clustering Score", hue=listname, **kwargs)
return ax
def plot_fingerprint(data, plot_style, plot_type, listname, subjname, **kwargs):
# set default style to violin
plot_style = plot_style if plot_style is not None else 'violin'
plot_type = plot_type if plot_type is not None else 'list'
if plot_style is 'bar':
plot_func = sns.barplot
elif plot_style is 'swarm':
plot_func = sns.swarmplot
elif plot_style is 'violin':
plot_func = sns.violinplot
if plot_type is 'list':
ax = plot_func(data=tidy_data, x="Feature", y="Clustering Score", hue=listname, **kwargs)
elif plot_type is 'subject':
ax = plot_func(data=tidy_data, x="Feature", y="Clustering Score", hue=subjname, **kwargs)
else:
ax = plot_func(data=tidy_data, x="Feature", y="Clustering Score", **kwargs)
return ax
def plot_fingerprint_temporal(data, plot_style, plot_type, listname, subjname, **kwargs):
# set default style to violin
plot_style = plot_style if plot_style is not None else 'violin'
plot_type = plot_type if plot_type is not None else 'list'
if plot_style is 'bar':
plot_func = sns.barplot
elif plot_style is 'swarm':
plot_func = sns.swarmplot
elif plot_style is 'violin':
plot_func = sns.violinplot
order = list(tidy_data['Feature'].unique())
if plot_type is 'list':
ax = plot_func(data=data, x="Feature", y="Clustering Score", hue=listname, order=order, **kwargs)
elif plot_type is 'subject':
ax = plot_func(data=data, x="Feature", y="Clustering Score", hue=subjname, order=order, **kwargs)
else:
ax = plot_func(data=data, x="Feature", y="Clustering Score", order=order, **kwargs)
return ax
def plot_spc(data, plot_style, plot_type, listname, subjname, **kwargs):
plot_type = plot_type if plot_type is not None else 'list'
if plot_type is 'subject':
ax = sns.lineplot(data = data, x="Position", y="Proportion Recalled", hue=subjname, **kwargs)
elif plot_type is 'list':
ax = sns.lineplot(data = data, x="Position", y="Proportion Recalled", hue=listname, **kwargs)
ax.set_xlim(0, data['Position'].max())
return ax
def plot_pnr(data, plot_style, plot_type, listname, subjname, position, list_length, **kwargs):
plot_type = plot_type if plot_type is not None else 'list'
if plot_type is 'subject':
ax = sns.lineplot(data = data, x="Position", y='Probability of Recall: Position ' + str(position), hue=subjname, **kwargs)
elif plot_type is 'list':
ax = sns.lineplot(data = data, x="Position", y='Probability of Recall: Position ' + str(position), hue=listname, **kwargs)
ax.set_xlim(0,list_length-1)
return ax
def plot_lagcrp(data, plot_style, plot_type, listname, subjname, **kwargs):
plot_type = plot_type if plot_type is not None else 'list'
if plot_type is 'subject':
ax = sns.lineplot(data=data[data['Position']<0], x="Position", y="Conditional Response Probability", hue=subjname, **kwargs)
if 'ax' in kwargs:
del kwargs['ax']
sns.lineplot(data=data[data['Position']>0], x="Position", y="Conditional Response Probability", hue=subjname, ax=ax, legend=False, **kwargs)
elif plot_type is 'list':
ax = sns.lineplot(data=data[data['Position']<0], x="Position", y="Conditional Response Probability", hue=listname, **kwargs)
if 'ax' in kwargs:
del kwargs['ax']
sns.lineplot(data=data[data['Position']>0], x="Position", y="Conditional Response Probability", hue=listname, ax=ax, legend=False, **kwargs)
ax.set_xlim(-5,5)
return ax
# if no grouping, set default to iterate over each list independently
subjgroup = subjgroup if subjgroup is not None else results.data.index.levels[0].values
listgroup = listgroup if listgroup is not None else results.data.index.levels[1].values
if subjconds:
# make sure its a list
if type(subjconds) is not list:
subjconds=[subjconds]
# slice
idx = pd.IndexSlice
results.data = results.data.sort_index()
results.data = results.data.loc[idx[subjconds, :],:]
# filter subjgroup
subjgroup = filter(lambda x: x in subjconds, subjgroup)
if listconds:
# make sure its a list
if type(listconds) is not list:
listconds=[listconds]
# slice
idx = pd.IndexSlice
results.data = results.data.sort_index()
results.data = results.data.loc[idx[:, listconds],:]
# convert to tiny and format for plotting
tidy_data = format2tidy(results.data, subjname, listname, subjgroup, analysis=results.analysis, position=results.position)
if not ax==None:
kwargs['ax']=ax
#plot!
if results.analysis=='accuracy':
ax = plot_acc(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
elif results.analysis=='temporal':
ax = plot_temporal(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
elif results.analysis=='fingerprint':
ax = plot_fingerprint(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
elif results.analysis=='fingerprint_temporal':
ax = plot_fingerprint_temporal(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
elif results.analysis=='spc':
ax = plot_spc(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
elif results.analysis=='pfr' or results.analysis=='pnr':
ax = plot_pnr(tidy_data, plot_style, plot_type, listname, subjname, position=results.position, list_length=results.list_length, **kwargs)
elif results.analysis=='lagcrp':
ax = plot_lagcrp(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
else:
raise ValueError("Did not recognize analysis.")
# add title
if title:
plt.title(title)
if legend is False:
try:
ax.legend_.remove()
except:
pass
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
if save_path:
mpl.rcParams['pdf.fonttype'] = 42
plt.savefig(save_path)
return ax |
Return dictionary of Traffic Stats for Network Interfaces.
@return: Nested dictionary of statistics for each interface.
def getIfStats(self):
"""Return dictionary of Traffic Stats for Network Interfaces.
@return: Nested dictionary of statistics for each interface.
"""
info_dict = {}
try:
fp = open(ifaceStatsFile, 'r')
data = fp.read()
fp.close()
except:
raise IOError('Failed reading interface stats from file: %s'
% ifaceStatsFile)
for line in data.splitlines():
mobj = re.match('^\s*([\w\d:]+):\s*(.*\S)\s*$', line)
if mobj:
iface = mobj.group(1)
statline = mobj.group(2)
info_dict[iface] = dict(zip(
('rxbytes', 'rxpackets', 'rxerrs', 'rxdrop', 'rxfifo',
'rxframe', 'rxcompressed', 'rxmulticast',
'txbytes', 'txpackets', 'txerrs', 'txdrop', 'txfifo',
'txcolls', 'txcarrier', 'txcompressed'),
[int(x) for x in statline.split()]))
return info_dict |
Return dictionary of Interface Configuration (ifconfig).
@return: Dictionary of if configurations keyed by if name.
def getIfConfig(self):
"""Return dictionary of Interface Configuration (ifconfig).
@return: Dictionary of if configurations keyed by if name.
"""
conf = {}
try:
out = subprocess.Popen([ipCmd, "addr", "show"],
stdout=subprocess.PIPE).communicate()[0]
except:
raise Exception('Execution of command %s failed.' % ipCmd)
for line in out.splitlines():
mobj = re.match('^\d+: (\S+):\s+<(\S*)>\s+(\S.*\S)\s*$', line)
if mobj:
iface = mobj.group(1)
conf[iface] = {}
continue
mobj = re.match('^\s{4}link\/(.*\S)\s*$', line)
if mobj:
arr = mobj.group(1).split()
if len(arr) > 0:
conf[iface]['type'] = arr[0]
if len(arr) > 1:
conf[iface]['hwaddr'] = arr[1]
continue
mobj = re.match('^\s+(inet|inet6)\s+([\d\.\:A-Za-z]+)\/(\d+)($|\s+.*\S)\s*$', line)
if mobj:
proto = mobj.group(1)
if not conf[iface].has_key(proto):
conf[iface][proto] = []
addrinfo = {}
addrinfo['addr'] = mobj.group(2).lower()
addrinfo['mask'] = int(mobj.group(3))
arr = mobj.group(4).split()
if len(arr) > 0 and arr[0] == 'brd':
addrinfo['brd'] = arr[1]
conf[iface][proto].append(addrinfo)
continue
return conf |
Get routing table.
@return: List of routes.
def getRoutes(self):
"""Get routing table.
@return: List of routes.
"""
routes = []
try:
out = subprocess.Popen([routeCmd, "-n"],
stdout=subprocess.PIPE).communicate()[0]
except:
raise Exception('Execution of command %s failed.' % ipCmd)
lines = out.splitlines()
if len(lines) > 1:
headers = [col.lower() for col in lines[1].split()]
for line in lines[2:]:
routes.append(dict(zip(headers, line.split())))
return routes |
Execute ps command with positional params args and return result as
list of lines.
@param *args: Positional params for netstat command.
@return: List of output lines
def execNetstatCmd(self, *args):
"""Execute ps command with positional params args and return result as
list of lines.
@param *args: Positional params for netstat command.
@return: List of output lines
"""
out = util.exec_command([netstatCmd,] + list(args))
return out.splitlines() |
Execute netstat command and return result as a nested dictionary.
@param tcp: Include TCP ports in ouput if True.
@param udp: Include UDP ports in ouput if True.
@param ipv4: Include IPv4 ports in output if True.
@param ipv6: Include IPv6 ports in output if True.
@param include_listen: Include listening ports in output if True.
@param only_listen: Include only listening ports in output if True.
@param show_users: Show info on owning users for ports if True.
@param show_procs: Show info on PID and Program Name attached to
ports if True.
@param resolve_hosts: Resolve IP addresses into names if True.
@param resolve_ports: Resolve numeric ports to names if True.
@param resolve_users: Resolve numeric user IDs to user names if True.
@return: List of headers and list of rows and columns.
def parseNetstatCmd(self, tcp=True, udp=True, ipv4=True, ipv6=True,
include_listen=True, only_listen=False,
show_users=False, show_procs=False,
resolve_hosts=False, resolve_ports=False,
resolve_users=True):
"""Execute netstat command and return result as a nested dictionary.
@param tcp: Include TCP ports in ouput if True.
@param udp: Include UDP ports in ouput if True.
@param ipv4: Include IPv4 ports in output if True.
@param ipv6: Include IPv6 ports in output if True.
@param include_listen: Include listening ports in output if True.
@param only_listen: Include only listening ports in output if True.
@param show_users: Show info on owning users for ports if True.
@param show_procs: Show info on PID and Program Name attached to
ports if True.
@param resolve_hosts: Resolve IP addresses into names if True.
@param resolve_ports: Resolve numeric ports to names if True.
@param resolve_users: Resolve numeric user IDs to user names if True.
@return: List of headers and list of rows and columns.
"""
headers = ['proto', 'ipversion', 'recvq', 'sendq',
'localaddr', 'localport','foreignaddr', 'foreignport',
'state']
args = []
proto = []
if ipv4:
proto.append('inet')
if ipv6:
proto.append('inet6')
if len(proto) > 0:
args.append('-A')
args.append(','.join(proto))
if tcp:
args.append('-t')
if udp:
args.append('-u')
if only_listen:
args.append('-l')
elif include_listen:
args.append('-a')
regexp_str = ('(tcp|udp)(\d*)\s+(\d+)\s+(\d+)\s+'
'(\S+):(\w+)\s+(\S+):(\w+|\*)\s+(\w*)')
if show_users:
args.append('-e')
regexp_str += '\s+(\w+)\s+(\d+)'
headers.extend(['user', 'inode'])
if show_procs:
args.append('-p')
regexp_str += '\s+(\S+)'
headers.extend(['pid', 'prog'])
if not resolve_hosts:
args.append('--numeric-hosts')
if not resolve_ports:
args.append('--numeric-ports')
if not resolve_users:
args.append('--numeric-users')
lines = self.execNetstatCmd(*args)
stats = []
regexp = re.compile(regexp_str)
for line in lines[2:]:
mobj = regexp.match(line)
if mobj is not None:
stat = list(mobj.groups())
if stat[1] == '0':
stat[1] = '4'
if stat[8] == '':
stat[8] = None
if show_procs:
proc = stat.pop().split('/')
if len(proc) == 2:
stat.extend(proc)
else:
stat.extend([None, None])
stats.append(stat)
return {'headers': headers, 'stats': stats} |
Execute netstat command and return result as a nested dictionary.
@param tcp: Include TCP ports in ouput if True.
@param udp: Include UDP ports in ouput if True.
@param ipv4: Include IPv4 ports in output if True.
@param ipv6: Include IPv6 ports in output if True.
@param include_listen: Include listening ports in output if True.
@param only_listen: Include only listening ports in output if True.
@param show_users: Show info on owning users for ports if True.
@param show_procs: Show info on PID and Program Name attached to
ports if True.
@param resolve_hosts: Resolve IP addresses into names if True.
@param resolve_ports: Resolve numeric ports to names if True.
@param resolve_users: Resolve numeric user IDs to user names if True.
@param **kwargs: Keyword variables are used for filtering the
results depending on the values of the columns.
Each keyword must correspond to a field name with
an optional suffix:
field: Field equal to value or in list
of values.
field_ic: Field equal to value or in list of
values, using case insensitive
comparison.
field_regex: Field matches regex value or
matches with any regex in list of
values.
field_ic_regex: Field matches regex value or
matches with any regex in list of
values using case insensitive
match.
@return: List of headers and list of rows and columns.
def getStats(self, tcp=True, udp=True, ipv4=True, ipv6=True,
include_listen=True, only_listen=False,
show_users=False, show_procs=False,
resolve_hosts=False, resolve_ports=False, resolve_users=True,
**kwargs):
"""Execute netstat command and return result as a nested dictionary.
@param tcp: Include TCP ports in ouput if True.
@param udp: Include UDP ports in ouput if True.
@param ipv4: Include IPv4 ports in output if True.
@param ipv6: Include IPv6 ports in output if True.
@param include_listen: Include listening ports in output if True.
@param only_listen: Include only listening ports in output if True.
@param show_users: Show info on owning users for ports if True.
@param show_procs: Show info on PID and Program Name attached to
ports if True.
@param resolve_hosts: Resolve IP addresses into names if True.
@param resolve_ports: Resolve numeric ports to names if True.
@param resolve_users: Resolve numeric user IDs to user names if True.
@param **kwargs: Keyword variables are used for filtering the
results depending on the values of the columns.
Each keyword must correspond to a field name with
an optional suffix:
field: Field equal to value or in list
of values.
field_ic: Field equal to value or in list of
values, using case insensitive
comparison.
field_regex: Field matches regex value or
matches with any regex in list of
values.
field_ic_regex: Field matches regex value or
matches with any regex in list of
values using case insensitive
match.
@return: List of headers and list of rows and columns.
"""
pinfo = self.parseNetstatCmd(tcp, udp, ipv4, ipv6,
include_listen, only_listen,
show_users, show_procs,
resolve_hosts, resolve_ports, resolve_users)
if pinfo:
if len(kwargs) > 0:
pfilter = util.TableFilter()
pfilter.registerFilters(**kwargs)
stats = pfilter.applyFilters(pinfo['headers'], pinfo['stats'])
return {'headers': pinfo['headers'], 'stats': stats}
else:
return pinfo
else:
return None |
Returns the number of TCP endpoints discriminated by status.
@param ipv4: Include IPv4 ports in output if True.
@param ipv6: Include IPv6 ports in output if True.
@param include_listen: Include listening ports in output if True.
@param **kwargs: Keyword variables are used for filtering the
results depending on the values of the columns.
Each keyword must correspond to a field name with
an optional suffix:
field: Field equal to value or in list
of values.
field_ic: Field equal to value or in list of
values, using case insensitive
comparison.
field_regex: Field matches regex value or
matches with any regex in list of
values.
field_ic_regex: Field matches regex value or
matches with any regex in list of
values using case insensitive
match.
@return: Dictionary mapping connection status to the
number of endpoints.
def getTCPportConnStatus(self, ipv4=True, ipv6=True, include_listen=False,
**kwargs):
"""Returns the number of TCP endpoints discriminated by status.
@param ipv4: Include IPv4 ports in output if True.
@param ipv6: Include IPv6 ports in output if True.
@param include_listen: Include listening ports in output if True.
@param **kwargs: Keyword variables are used for filtering the
results depending on the values of the columns.
Each keyword must correspond to a field name with
an optional suffix:
field: Field equal to value or in list
of values.
field_ic: Field equal to value or in list of
values, using case insensitive
comparison.
field_regex: Field matches regex value or
matches with any regex in list of
values.
field_ic_regex: Field matches regex value or
matches with any regex in list of
values using case insensitive
match.
@return: Dictionary mapping connection status to the
number of endpoints.
"""
status_dict = {}
result = self.getStats(tcp=True, udp=False,
include_listen=include_listen,
ipv4=ipv4, ipv6=ipv6,
**kwargs)
stats = result['stats']
for stat in stats:
if stat is not None:
status = stat[8].lower()
status_dict[status] = status_dict.get(status, 0) + 1
return status_dict |
Returns TCP connection counts for each local port.
@param ipv4: Include IPv4 ports in output if True.
@param ipv6: Include IPv6 ports in output if True.
@param resolve_ports: Resolve numeric ports to names if True.
@param **kwargs: Keyword variables are used for filtering the
results depending on the values of the columns.
Each keyword must correspond to a field name with
an optional suffix:
field: Field equal to value or in list
of values.
field_ic: Field equal to value or in list of
values, using case insensitive
comparison.
field_regex: Field matches regex value or
matches with any regex in list of
values.
field_ic_regex: Field matches regex value or
matches with any regex in list of
values using case insensitive
match.
@return: Dictionary mapping port number or name to the
number of established connections.
def getTCPportConnCount(self, ipv4=True, ipv6=True, resolve_ports=False,
**kwargs):
"""Returns TCP connection counts for each local port.
@param ipv4: Include IPv4 ports in output if True.
@param ipv6: Include IPv6 ports in output if True.
@param resolve_ports: Resolve numeric ports to names if True.
@param **kwargs: Keyword variables are used for filtering the
results depending on the values of the columns.
Each keyword must correspond to a field name with
an optional suffix:
field: Field equal to value or in list
of values.
field_ic: Field equal to value or in list of
values, using case insensitive
comparison.
field_regex: Field matches regex value or
matches with any regex in list of
values.
field_ic_regex: Field matches regex value or
matches with any regex in list of
values using case insensitive
match.
@return: Dictionary mapping port number or name to the
number of established connections.
"""
port_dict = {}
result = self.getStats(tcp=True, udp=False,
include_listen=False, ipv4=ipv4,
ipv6=ipv6, resolve_ports=resolve_ports,
**kwargs)
stats = result['stats']
for stat in stats:
if stat[8] == 'ESTABLISHED':
port_dict[stat[5]] = port_dict.get(5, 0) + 1
return port_dict |
Computes proportion of words recalled
Parameters
----------
egg : quail.Egg
Data to analyze
match : str (exact, best or smooth)
Matching approach to compute recall matrix. If exact, the presented and
recalled items must be identical (default). If best, the recalled item
that is most similar to the presented items will be selected. If smooth,
a weighted average of all presented items will be used, where the
weights are derived from the similarity between the recalled item and
each presented item.
distance : str
The distance function used to compare presented and recalled items.
Applies only to 'best' and 'smooth' matching approaches. Can be any
distance function supported by numpy.spatial.distance.cdist.
Returns
----------
prop_recalled : numpy array
proportion of words recalled
def accuracy_helper(egg, match='exact', distance='euclidean',
features=None):
"""
Computes proportion of words recalled
Parameters
----------
egg : quail.Egg
Data to analyze
match : str (exact, best or smooth)
Matching approach to compute recall matrix. If exact, the presented and
recalled items must be identical (default). If best, the recalled item
that is most similar to the presented items will be selected. If smooth,
a weighted average of all presented items will be used, where the
weights are derived from the similarity between the recalled item and
each presented item.
distance : str
The distance function used to compare presented and recalled items.
Applies only to 'best' and 'smooth' matching approaches. Can be any
distance function supported by numpy.spatial.distance.cdist.
Returns
----------
prop_recalled : numpy array
proportion of words recalled
"""
def acc(lst):
return len([i for i in np.unique(lst) if i>=0])/(egg.list_length)
opts = dict(match=match, distance=distance, features=features)
if match is 'exact':
opts.update({'features' : 'item'})
recmat = recall_matrix(egg, **opts)
if match in ['exact', 'best']:
result = [acc(lst) for lst in recmat]
elif match is 'smooth':
result = np.mean(recmat, axis=1)
else:
raise ValueError('Match must be set to exact, best or smooth.')
return np.nanmean(result, axis=0) |
Establish connection to PostgreSQL Database.
def _connect(self):
"""Establish connection to PostgreSQL Database."""
if self._connParams:
self._conn = psycopg2.connect(**self._connParams)
else:
self._conn = psycopg2.connect('')
try:
ver_str = self._conn.get_parameter_status('server_version')
except AttributeError:
ver_str = self.getParam('server_version')
self._version = util.SoftwareVersion(ver_str) |
Utility method that returns database stats as a nested dictionary.
@param headers: List of columns in query result.
@param rows: List of rows in query result.
@return: Nested dictionary of values.
First key is the database name and the second key is the
statistics counter name.
def _createStatsDict(self, headers, rows):
"""Utility method that returns database stats as a nested dictionary.
@param headers: List of columns in query result.
@param rows: List of rows in query result.
@return: Nested dictionary of values.
First key is the database name and the second key is the
statistics counter name.
"""
dbstats = {}
for row in rows:
dbstats[row[0]] = dict(zip(headers[1:], row[1:]))
return dbstats |
Utility method that returns totals for database statistics.
@param headers: List of columns in query result.
@param rows: List of rows in query result.
@return: Dictionary of totals for each statistics column.
def _createTotalsDict(self, headers, rows):
"""Utility method that returns totals for database statistics.
@param headers: List of columns in query result.
@param rows: List of rows in query result.
@return: Dictionary of totals for each statistics column.
"""
totals = [sum(col) for col in zip(*rows)[1:]]
return dict(zip(headers[1:], totals)) |
Executes simple query which returns a single column.
@param query: Query string.
@return: Query result string.
def _simpleQuery(self, query):
"""Executes simple query which returns a single column.
@param query: Query string.
@return: Query result string.
"""
cur = self._conn.cursor()
cur.execute(query)
row = cur.fetchone()
return util.parse_value(row[0]) |
Returns value of Run-time Database Parameter 'key'.
@param key: Run-time parameter name.
@return: Run-time parameter value.
def getParam(self, key):
"""Returns value of Run-time Database Parameter 'key'.
@param key: Run-time parameter name.
@return: Run-time parameter value.
"""
cur = self._conn.cursor()
cur.execute("SHOW %s" % key)
row = cur.fetchone()
return util.parse_value(row[0]) |
Returns dictionary with number of connections for each database.
@return: Dictionary of database connection statistics.
def getConnectionStats(self):
"""Returns dictionary with number of connections for each database.
@return: Dictionary of database connection statistics.
"""
cur = self._conn.cursor()
cur.execute("""SELECT datname,numbackends FROM pg_stat_database;""")
rows = cur.fetchall()
if rows:
return dict(rows)
else:
return {} |
Returns database block read, transaction and tuple stats for each
database.
@return: Nested dictionary of stats.
def getDatabaseStats(self):
"""Returns database block read, transaction and tuple stats for each
database.
@return: Nested dictionary of stats.
"""
headers = ('datname', 'numbackends', 'xact_commit', 'xact_rollback',
'blks_read', 'blks_hit', 'tup_returned', 'tup_fetched',
'tup_inserted', 'tup_updated', 'tup_deleted', 'disk_size')
cur = self._conn.cursor()
cur.execute("SELECT %s, pg_database_size(datname) FROM pg_stat_database;"
% ",".join(headers[:-1]))
rows = cur.fetchall()
dbstats = self._createStatsDict(headers, rows)
totals = self._createTotalsDict(headers, rows)
return {'databases': dbstats, 'totals': totals} |
Returns the number of active lock discriminated by lock mode.
@return: : Dictionary of stats.
def getLockStatsMode(self):
"""Returns the number of active lock discriminated by lock mode.
@return: : Dictionary of stats.
"""
info_dict = {'all': dict(zip(self.lockModes, (0,) * len(self.lockModes))),
'wait': dict(zip(self.lockModes, (0,) * len(self.lockModes)))}
cur = self._conn.cursor()
cur.execute("SELECT TRIM(mode, 'Lock'), granted, COUNT(*) FROM pg_locks "
"GROUP BY TRIM(mode, 'Lock'), granted;")
rows = cur.fetchall()
for (mode, granted, cnt) in rows:
info_dict['all'][mode] += cnt
if not granted:
info_dict['wait'][mode] += cnt
return info_dict |
Returns the number of active lock discriminated by database.
@return: : Dictionary of stats.
def getLockStatsDB(self):
"""Returns the number of active lock discriminated by database.
@return: : Dictionary of stats.
"""
info_dict = {'all': {},
'wait': {}}
cur = self._conn.cursor()
cur.execute("SELECT d.datname, l.granted, COUNT(*) FROM pg_database d "
"JOIN pg_locks l ON d.oid=l.database "
"GROUP BY d.datname, l.granted;")
rows = cur.fetchall()
for (db, granted, cnt) in rows:
info_dict['all'][db] = info_dict['all'].get(db, 0) + cnt
if not granted:
info_dict['wait'][db] = info_dict['wait'].get(db, 0) + cnt
return info_dict |
Returns Global Background Writer and Checkpoint Activity stats.
@return: Nested dictionary of stats.
def getBgWriterStats(self):
"""Returns Global Background Writer and Checkpoint Activity stats.
@return: Nested dictionary of stats.
"""
info_dict = {}
if self.checkVersion('8.3'):
cur = self._conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cur.execute("SELECT * FROM pg_stat_bgwriter")
info_dict = cur.fetchone()
return info_dict |
Returns Transaction Logging or Recovery Status.
@return: Dictionary of status items.
def getXlogStatus(self):
"""Returns Transaction Logging or Recovery Status.
@return: Dictionary of status items.
"""
inRecovery = None
if self.checkVersion('9.0'):
inRecovery = self._simpleQuery("SELECT pg_is_in_recovery();")
cur = self._conn.cursor()
if inRecovery:
cols = ['pg_last_xlog_receive_location()',
'pg_last_xlog_replay_location()',]
headers = ['xlog_receive_location',
'xlog_replay_location',]
if self.checkVersion('9.1'):
cols.extend(['pg_last_xact_replay_timestamp()',
'pg_is_xlog_replay_paused()',])
headers.extend(['xact_replay_timestamp',
'xlog_replay_paused',])
cur.execute("""SELECT %s;""" % ','.join(cols))
headers = ('xlog_receive_location', 'xlog_replay_location')
else:
cur.execute("""SELECT
pg_current_xlog_location(),
pg_xlogfile_name(pg_current_xlog_location());""")
headers = ('xlog_location', 'xlog_filename')
row = cur.fetchone()
info_dict = dict(zip(headers, row))
if inRecovery is not None:
info_dict['in_recovery'] = inRecovery
return info_dict |
Returns status of replication slaves.
@return: Dictionary of status items.
def getSlaveStatus(self):
"""Returns status of replication slaves.
@return: Dictionary of status items.
"""
info_dict = {}
if self.checkVersion('9.1'):
cols = ['procpid', 'usename', 'application_name',
'client_addr', 'client_port', 'backend_start', 'state',
'sent_location', 'write_location', 'flush_location',
'replay_location', 'sync_priority', 'sync_state',]
cur = self._conn.cursor()
cur.execute("""SELECT %s FROM pg_stat_replication;"""
% ','.join(cols))
rows = cur.fetchall()
for row in rows:
info_dict[row[0]] = dict(zip(cols[1:], row[1:]))
else:
return None
return info_dict |
Establish connection to MySQL Database.
def _connect(self):
"""Establish connection to MySQL Database."""
if self._connParams:
self._conn = MySQLdb.connect(**self._connParams)
else:
self._conn = MySQLdb.connect('') |
Returns list of supported storage engines.
@return: List of storage engine names.
def getStorageEngines(self):
"""Returns list of supported storage engines.
@return: List of storage engine names.
"""
cur = self._conn.cursor()
cur.execute("""SHOW STORAGE ENGINES;""")
rows = cur.fetchall()
if rows:
return [row[0].lower() for row in rows if row[1] in ['YES', 'DEFAULT']]
else:
return [] |
Returns value of Run-time Database Parameter 'key'.
@param key: Run-time parameter name.
@return: Run-time parameter value.
def getParam(self, key):
"""Returns value of Run-time Database Parameter 'key'.
@param key: Run-time parameter name.
@return: Run-time parameter value.
"""
cur = self._conn.cursor()
cur.execute("SHOW GLOBAL VARIABLES LIKE %s", key)
row = cur.fetchone()
return int(row[1]) |
Returns dictionary of all run-time parameters.
@return: Dictionary of all Run-time parameters.
def getParams(self):
"""Returns dictionary of all run-time parameters.
@return: Dictionary of all Run-time parameters.
"""
cur = self._conn.cursor()
cur.execute("SHOW GLOBAL VARIABLES")
rows = cur.fetchall()
info_dict = {}
for row in rows:
key = row[0]
val = util.parse_value(row[1])
info_dict[key] = val
return info_dict |
Returns number of processes discriminated by state.
@return: Dictionary mapping process state to number of processes.
def getProcessStatus(self):
"""Returns number of processes discriminated by state.
@return: Dictionary mapping process state to number of processes.
"""
info_dict = {}
cur = self._conn.cursor()
cur.execute("""SHOW FULL PROCESSLIST;""")
rows = cur.fetchall()
if rows:
for row in rows:
if row[6] == '':
state = 'idle'
elif row[6] is None:
state = 'other'
else:
state = str(row[6]).replace(' ', '_').lower()
info_dict[state] = info_dict.get(state, 0) + 1
return info_dict |
Returns number of processes discriminated by database name.
@return: Dictionary mapping database name to number of processes.
def getProcessDatabase(self):
"""Returns number of processes discriminated by database name.
@return: Dictionary mapping database name to number of processes.
"""
info_dict = {}
cur = self._conn.cursor()
cur.execute("""SHOW FULL PROCESSLIST;""")
rows = cur.fetchall()
if rows:
for row in rows:
db = row[3]
info_dict[db] = info_dict.get(db, 0) + 1
return info_dict |
Returns list of databases.
@return: List of databases.
def getDatabases(self):
"""Returns list of databases.
@return: List of databases.
"""
cur = self._conn.cursor()
cur.execute("""SHOW DATABASES;""")
rows = cur.fetchall()
if rows:
return [row[0] for row in rows]
else:
return [] |
Computes probability of a word being recalled (in the appropriate recall list), given its presentation position
Parameters
----------
egg : quail.Egg
Data to analyze
match : str (exact, best or smooth)
Matching approach to compute recall matrix. If exact, the presented and
recalled items must be identical (default). If best, the recalled item
that is most similar to the presented items will be selected. If smooth,
a weighted average of all presented items will be used, where the
weights are derived from the similarity between the recalled item and
each presented item.
distance : str
The distance function used to compare presented and recalled items.
Applies only to 'best' and 'smooth' matching approaches. Can be any
distance function supported by numpy.spatial.distance.cdist.
Returns
----------
prec : numpy array
each number represents the probability of recall for a word presented in given position/index
def spc_helper(egg, match='exact', distance='euclidean',
features=None):
"""
Computes probability of a word being recalled (in the appropriate recall list), given its presentation position
Parameters
----------
egg : quail.Egg
Data to analyze
match : str (exact, best or smooth)
Matching approach to compute recall matrix. If exact, the presented and
recalled items must be identical (default). If best, the recalled item
that is most similar to the presented items will be selected. If smooth,
a weighted average of all presented items will be used, where the
weights are derived from the similarity between the recalled item and
each presented item.
distance : str
The distance function used to compare presented and recalled items.
Applies only to 'best' and 'smooth' matching approaches. Can be any
distance function supported by numpy.spatial.distance.cdist.
Returns
----------
prec : numpy array
each number represents the probability of recall for a word presented in given position/index
"""
def spc(lst):
d = np.zeros_like(egg.pres.values[0])
inds = np.array(lst[~np.isnan(lst)]).astype(int)
d[inds-1]=1
return d
opts = dict(match=match, distance=distance, features=features)
if match is 'exact':
opts.update({'features' : 'item'})
recmat = recall_matrix(egg, **opts)
if match in ['exact', 'best']:
result = [spc(lst) for lst in recmat]
elif match == 'smooth':
result = np.nanmean(recmat, 2)
else:
raise ValueError('Match must be set to exact, best or smooth.')
return np.mean(result, 0) |
Query Apache Tomcat Server Status Page in XML format and return
the result as an ElementTree object.
@return: ElementTree object of Status Page XML.
def _retrieve(self):
"""Query Apache Tomcat Server Status Page in XML format and return
the result as an ElementTree object.
@return: ElementTree object of Status Page XML.
"""
url = "%s://%s:%d/manager/status" % (self._proto, self._host, self._port)
params = {}
params['XML'] = 'true'
response = util.get_url(url, self._user, self._password, params)
tree = ElementTree.XML(response)
return tree |
Return JVM Memory Stats for Apache Tomcat Server.
@return: Dictionary of memory utilization stats.
def getMemoryStats(self):
"""Return JVM Memory Stats for Apache Tomcat Server.
@return: Dictionary of memory utilization stats.
"""
if self._statusxml is None:
self.initStats()
node = self._statusxml.find('jvm/memory')
memstats = {}
if node is not None:
for (key,val) in node.items():
memstats[key] = util.parse_value(val)
return memstats |
Return dictionary of Connector Stats for Apache Tomcat Server.
@return: Nested dictionary of Connector Stats.
def getConnectorStats(self):
"""Return dictionary of Connector Stats for Apache Tomcat Server.
@return: Nested dictionary of Connector Stats.
"""
if self._statusxml is None:
self.initStats()
connnodes = self._statusxml.findall('connector')
connstats = {}
if connnodes:
for connnode in connnodes:
namestr = connnode.get('name')
if namestr is not None:
mobj = re.match('(.*)-(\d+)', namestr)
if mobj:
proto = mobj.group(1)
port = int(mobj.group(2))
connstats[port] = {'proto': proto}
for tag in ('threadInfo', 'requestInfo'):
stats = {}
node = connnode.find(tag)
if node is not None:
for (key,val) in node.items():
if re.search('Time$', key):
stats[key] = float(val) / 1000.0
else:
stats[key] = util.parse_value(val)
if stats:
connstats[port][tag] = stats
return connstats |
Loads eggs, fried eggs ands example data
Parameters
----------
filepath : str
Location of file
update : bool
If true, updates egg to latest format
Returns
----------
data : quail.Egg or quail.FriedEgg
Data loaded from disk
def load(filepath, update=True):
"""
Loads eggs, fried eggs ands example data
Parameters
----------
filepath : str
Location of file
update : bool
If true, updates egg to latest format
Returns
----------
data : quail.Egg or quail.FriedEgg
Data loaded from disk
"""
if filepath == 'automatic' or filepath == 'example':
fpath = os.path.dirname(os.path.abspath(__file__)) + '/data/automatic.egg'
return load_egg(fpath)
elif filepath == 'manual':
fpath = os.path.dirname(os.path.abspath(__file__)) + '/data/manual.egg'
return load_egg(fpath, update=False)
elif filepath == 'naturalistic':
fpath = os.path.dirname(os.path.abspath(__file__)) + '/data/naturalistic.egg'
elif filepath.split('.')[-1]=='egg':
return load_egg(filepath, update=update)
elif filepath.split('.')[-1]=='fegg':
return load_fegg(filepath, update=False)
else:
raise ValueError('Could not load file.') |
Loads pickled egg
Parameters
----------
filepath : str
Location of pickled egg
update : bool
If true, updates egg to latest format
Returns
----------
egg : Egg data object
A loaded unpickled egg
def load_fegg(filepath, update=True):
"""
Loads pickled egg
Parameters
----------
filepath : str
Location of pickled egg
update : bool
If true, updates egg to latest format
Returns
----------
egg : Egg data object
A loaded unpickled egg
"""
try:
egg = FriedEgg(**dd.io.load(filepath))
except ValueError as e:
print(e)
# if error, try loading old format
with open(filepath, 'rb') as f:
egg = pickle.load(f)
if update:
return egg.crack()
else:
return egg |
Loads pickled egg
Parameters
----------
filepath : str
Location of pickled egg
update : bool
If true, updates egg to latest format
Returns
----------
egg : Egg data object
A loaded unpickled egg
def load_egg(filepath, update=True):
"""
Loads pickled egg
Parameters
----------
filepath : str
Location of pickled egg
update : bool
If true, updates egg to latest format
Returns
----------
egg : Egg data object
A loaded unpickled egg
"""
try:
egg = Egg(**dd.io.load(filepath))
except:
# if error, try loading old format
with open(filepath, 'rb') as f:
egg = pickle.load(f)
if update:
if egg.meta:
old_meta = egg.meta
egg.crack()
egg.meta = old_meta
return egg
else:
return egg.crack()
else:
return egg |
Function that loads sql files generated by autoFR Experiment
def loadEL(dbpath=None, recpath=None, remove_subs=None, wordpool=None, groupby=None, experiments=None,
filters=None):
'''
Function that loads sql files generated by autoFR Experiment
'''
assert (dbpath is not None), "You must specify a db file or files."
assert (recpath is not None), "You must specify a recall folder."
assert (wordpool is not None), "You must specify a wordpool file."
assert (experiments is not None), "You must specify a list of experiments"
############################################################################
# subfunctions #############################################################
def db2df(db, filter_func=None):
'''
Loads db file and converts to dataframe
'''
db_url = "sqlite:///" + db
table_name = 'turkdemo'
data_column_name = 'datastring'
# boilerplace sqlalchemy setup
engine = create_engine(db_url)
metadata = MetaData()
metadata.bind = engine
table = Table(table_name, metadata, autoload=True)
# make a query and loop through
s = table.select()
rows = s.execute()
data = []
for row in rows:
data.append(row[data_column_name])
# parse each participant's datastring as json object
# and take the 'data' sub-object
data = [json.loads(part)['data'] for part in data if part is not None]
# remove duplicate subject data for debugXG82XV:debug7XPXQA
# data[110] = data[110][348:]
# insert uniqueid field into trialdata in case it wasn't added
# in experiment:
for part in data:
for record in part:
# print(record)
if type(record['trialdata']) is list:
record['trialdata'] = {record['trialdata'][0]:record['trialdata'][1]}
record['trialdata']['uniqueid'] = record['uniqueid']
# flatten nested list so we just have a list of the trialdata recorded
# each time psiturk.recordTrialData(trialdata) was called.
def isNotNumber(s):
try:
float(s)
return False
except ValueError:
return True
data = [record['trialdata'] for part in data for record in part]
# filter out fields that we dont want using isNotNumber function
filtered_data = [{k:v for (k,v) in list(part.items()) if isNotNumber(k)} for part in data]
# Put all subjects' trial data into a dataframe object from the
# 'pandas' python library: one option among many for analysis
data_frame = pd.DataFrame(filtered_data)
data_column_name = 'codeversion'
# boilerplace sqlalchemy setup
engine = create_engine(db_url)
metadata = MetaData()
metadata.bind = engine
table = Table(table_name, metadata, autoload=True)
# make a query and loop through
s = table.select()
rows = s.execute()
versions = []
version_dict = {}
for row in rows:
version_dict[row[0]]=row[data_column_name]
version_col = []
for idx,sub in enumerate(data_frame['uniqueid'].unique()):
for i in range(sum(data_frame['uniqueid']==sub)):
version_col.append(version_dict[sub])
data_frame['exp_version']=version_col
if filter_func:
for idx,filt in enumerate(filter_func):
data_frame = filt(data_frame)
return data_frame
# custom filter to clean db file
def experimenter_filter(data_frame):
data=[]
indexes=[]
for line in data_frame.iterrows():
try:
if json.loads(line[1]['responses'])['Q1'].lower() in ['kirsten','allison','allison\nallison','marisol', 'marisol ','marisiol', 'maddy','campbell', 'campbell field', 'kirsten\nkirsten', 'emily', 'bryan', 'armando', 'armando ortiz',
'maddy/lucy','paxton', 'lucy','campbell\ncampbell','madison','darya','rachael']:
delete = False
else:
delete = True
except:
pass
if delete:
indexes.append(line[0])
return data_frame.drop(indexes)
def adaptive_filter(data_frame):
data=[]
indexes=[]
subjcb={}
for line in data_frame.iterrows():
try:
if 'Q2' in json.loads(line[1]['responses']):
delete = False
else:
delete = False
except:
pass
if delete:
indexes.append(line[0])
return data_frame.drop(indexes)
def experiments_filter(data_frame):
indexes=[]
for line in data_frame.iterrows():
try:
if line[1]['exp_version'] in experiments:
delete = False
else:
delete = True
except:
pass
if delete:
indexes.append(line[0])
return data_frame.drop(indexes)
# this function takes the data frame and returns subject specific data based on the subid variable
def filterData(data_frame,subid):
filtered_stim_data = data_frame[data_frame['stimulus'].notnull() & data_frame['listNumber'].notnull()]
filtered_stim_data = filtered_stim_data[filtered_stim_data['trial_type']=='single-stim']
filtered_stim_data = filtered_stim_data[filtered_stim_data['uniqueid']==subid]
return filtered_stim_data
def createStimDict(data):
stimDict = []
for index, row in data.iterrows():
try:
stimDict.append({
'text': str(re.findall('>(.+)<',row['stimulus'])[0]),
'color' : { 'r' : int(re.findall('rgb\((.+)\)',row['stimulus'])[0].split(',')[0]),
'g' : int(re.findall('rgb\((.+)\)',row['stimulus'])[0].split(',')[1]),
'b' : int(re.findall('rgb\((.+)\)',row['stimulus'])[0].split(',')[2])
},
'location' : {
'top': float(re.findall('top:(.+)\%;', row['stimulus'])[0]),
'left' : float(re.findall('left:(.+)\%', row['stimulus'])[0])
},
'category' : wordpool['CATEGORY'].iloc[list(wordpool['WORD'].values).index(str(re.findall('>(.+)<',row['stimulus'])[0]))],
'size' : wordpool['SIZE'].iloc[list(wordpool['WORD'].values).index(str(re.findall('>(.+)<',row['stimulus'])[0]))],
'wordLength' : len(str(re.findall('>(.+)<',row['stimulus'])[0])),
'firstLetter' : str(re.findall('>(.+)<',row['stimulus'])[0])[0],
'listnum' : row['listNumber']
})
except:
stimDict.append({
'text': str(re.findall('>(.+)<',row['stimulus'])[0]),
'color' : { 'r' : 0,
'g' : 0,
'b' : 0
},
'location' : {
'top': 50,
'left' : 50
},
'category' : wordpool['CATEGORY'].iloc[list(wordpool['WORD'].values).index(str(re.findall('>(.+)<',row['stimulus'])[0]))],
'size' : wordpool['SIZE'].iloc[list(wordpool['WORD'].values).index(str(re.findall('>(.+)<',row['stimulus'])[0]))],
'wordLength' : len(str(re.findall('>(.+)<',row['stimulus'])[0])),
'firstLetter' : str(re.findall('>(.+)<',row['stimulus'])[0])[0],
'listnum' : row['listNumber']
})
return stimDict
# this function loads in the recall data into an array of arrays, where each array represents a list of words
def loadRecallData(subid):
recalledWords = []
for i in range(0,16):
try:
f = open(recpath + subid + '/' + subid + '-' + str(i) + '.wav.txt', 'r')
spamreader = csv.reader(f, delimiter=',', quotechar='|')
except (IOError, OSError) as e:
try:
f = open(recpath + subid + '-' + str(i) + '.wav.txt', 'r')
spamreader = csv.reader(f, delimiter=',', quotechar='|')
except (IOError, OSError) as e:
print(e)
try:
words=[]
altformat=True
for row in spamreader:
if len(row)>1:
recalledWords.append(row)
altformat=False
break
else:
try:
words.append(row[0])
except:
pass
if altformat:
recalledWords.append(words)
except:
print('couldnt process '+ recpath + subid + '/' + subid + '-' + str(i) + '.wav.txt')
return recalledWords
# this function computes accuracy for a series of lists
def computeListAcc(stimDict,recalledWords):
accVec = []
for i in range(0,16):
stim = [stim['text'] for stim in stimDict if stim['listnum']==i]
recalled= recalledWords[i]
acc = 0
tmpstim = stim[:]
for word in recalled:
if word in tmpstim:
tmpstim.remove(word)
acc+=1
accVec.append(acc/len(stim))
return accVec
def getFeatures(stimDict):
stimDict_copy = stimDict[:]
for item in stimDict_copy:
item['location'] = [item['location']['top'], item['location']['left']]
item['color'] = [item['color']['r'], item['color']['g'], item['color']['b']]
item.pop('text', None)
item.pop('listnum', None)
stimDict_copy = [stimDict_copy[i:i+16] for i in range(0, len(stimDict_copy), 16)]
return stimDict_copy
############################################################################
# main program #############################################################
# if its not a list, make it one
if type(dbpath) is not list:
dbpath = [dbpath]
# read in stimulus library
wordpool = pd.read_csv(wordpool)
# add custom filters
if filters:
filter_func = [adaptive_filter, experimeter_filter, experiments_filter] + filters
else:
filter_func = [adaptive_filter, experimenter_filter, experiments_filter]
# load in dbs and convert to df, and filter
dfs = [db2df(db, filter_func=filter_func) for db in dbpath]
# concatenate the db files
df = pd.concat(dfs)
# subjects who have completed the exp
subids = list(df[df['listNumber']==15]['uniqueid'].unique())
# remove problematic subjects
if remove_subs:
for sub in remove_subs:
try:
subids.remove(sub)
except:
print('Could not find subject: ' + sub + ', skipping...')
# set up data structure to load in subjects
if groupby:
pres = [[] for i in range(len(groupby['exp_version']))]
rec = [[] for i in range(len(groupby['exp_version']))]
features = [[] for i in range(len(groupby['exp_version']))]
subs = [[] for i in range(len(groupby['exp_version']))]
# make each groupby item a list
groupby = [exp if type(exp) is list else [exp] for exp in groupby['exp_version']]
else:
pres = [[]]
rec = [[]]
features = [[]]
subs = [[]]
# for each subject that completed the experiment
for idx,sub in enumerate(subids):
# get the subjects data
filteredStimData = filterData(df,sub)
if filteredStimData['exp_version'].values[0] in experiments:
# create stim dict
stimDict = createStimDict(filteredStimData)
sub_data = pd.DataFrame(stimDict)
sub_data['subject']=idx
sub_data['experiment']=filteredStimData['exp_version'].values[0]
sub_data = sub_data[['experiment','subject','listnum','text','category','color','location','firstLetter','size','wordLength']]
# get features from stim dict
feats = getFeatures(stimDict)
# load in the recall data
recalledWords = loadRecallData(sub)
# get experiment version
exp_version = filteredStimData['exp_version'].values[0]
# find the idx of the experiment for this subjects
if groupby:
exp_idx = list(np.where([exp_version in item for item in groupby])[0])
else:
exp_idx = [0]
if exp_idx != []:
pres[exp_idx[0]].append([list(sub_data[sub_data['listnum']==lst]['text'].values) for lst in sub_data['listnum'].unique()])
rec[exp_idx[0]].append(recalledWords)
features[exp_idx[0]].append(feats)
subs[exp_idx[0]].append(sub)
eggs = [Egg(pres=ipres, rec=irec, features=ifeatures, meta={'ids' : isub}) for ipres,irec,ifeatures,isub in zip(pres, rec, features, subs)]
# map feature dictionaries in pres df to rec df
def checkword(x):
if x is None:
return x
else:
try:
return stim_dict[x['item']]
except:
return x
# convert utf-8 bytes type to string
def update_types(egg):
featlist = list(egg.pres.loc[0].loc[0].values.tolist()[0].keys())
def update1df(df):
for sub in range(egg.n_subjects):
for liszt in range(egg.n_lists):
for item in range(len(df.loc[sub].loc[liszt].values.tolist())):
for feat in featlist:
if feat in df.loc[sub].loc[liszt].values.tolist()[item].keys():
if isinstance(df.loc[sub].loc[liszt].values.tolist()[item][feat], np.bytes_):
try:
df.loc[sub].loc[liszt].values.tolist()[item][feat] = str(df.loc[sub].loc[liszt].values.tolist()[item][feat], 'utf-8')
except:
print("Subject " + str(sub) + ", list " + str(liszt) + ", item " + str(item) + ", feature " + str(feat) + ": Could not convert type " + str(type(egg.rec.loc[sub].loc[liszt].values.tolist()[item][feat])) + " to string.")
update1df(egg.pres)
update1df(egg.rec)
for egg in eggs:
update_types(egg)
old_meta = egg.meta
temp_eggs = [egg]
for i in range(egg.n_subjects):
e = egg.crack(subjects=[i])
stim = e.pres.values.ravel()
stim_dict = {str(x['item']) : {k:v for k, v in iter(x.items())} for x in stim}
e.rec = e.rec.applymap(lambda x: checkword(x))
temp_eggs.append(e)
edited_egg = stack_eggs(temp_eggs)
mapped_egg = edited_egg.crack(subjects=[i for i in range(egg.n_subjects,egg.n_subjects*2)])
mapped_egg.meta = old_meta
eggs[eggs.index(egg)] = mapped_egg
if len(eggs)>1:
return eggs
else:
return eggs[0] |
Loads example data
The automatic and manual example data are eggs containing 30 subjects who completed a free
recall experiment as described here: https://psyarxiv.com/psh48/. The subjects
studied 8 lists of 16 words each and then performed a free recall test.
The naturalistic example data is is an egg containing 17 subjects who viewed and verbally
recounted an episode of the BBC series Sherlock, as described here:
https://www.nature.com/articles/nn.4450. We fit a topic model to hand-annotated
text-descriptions of scenes from the video and used the model to transform both the
scene descriptions and manual transcriptions of each subject's verbal recall. We then
used a Hidden Markov Model to segment the video model and the recall models, by subject,
into k events.
Parameters
----------
dataset : str
The dataset to load. Can be 'automatic', 'manual', or 'naturalistic'. The free recall
audio recordings for the 'automatic' dataset was transcribed by Google
Cloud Speech and the 'manual' dataset was transcribed by humans. The 'naturalistic'
dataset was transcribed by humans and transformed as described above.
Returns
----------
data : quail.Egg
Example data
def load_example_data(dataset='automatic'):
"""
Loads example data
The automatic and manual example data are eggs containing 30 subjects who completed a free
recall experiment as described here: https://psyarxiv.com/psh48/. The subjects
studied 8 lists of 16 words each and then performed a free recall test.
The naturalistic example data is is an egg containing 17 subjects who viewed and verbally
recounted an episode of the BBC series Sherlock, as described here:
https://www.nature.com/articles/nn.4450. We fit a topic model to hand-annotated
text-descriptions of scenes from the video and used the model to transform both the
scene descriptions and manual transcriptions of each subject's verbal recall. We then
used a Hidden Markov Model to segment the video model and the recall models, by subject,
into k events.
Parameters
----------
dataset : str
The dataset to load. Can be 'automatic', 'manual', or 'naturalistic'. The free recall
audio recordings for the 'automatic' dataset was transcribed by Google
Cloud Speech and the 'manual' dataset was transcribed by humans. The 'naturalistic'
dataset was transcribed by humans and transformed as described above.
Returns
----------
data : quail.Egg
Example data
"""
# can only be auto or manual
assert dataset in ['automatic', 'manual', 'naturalistic'], "Dataset can only be automatic, manual, or naturalistic"
if dataset == 'naturalistic':
# open naturalistic egg
egg = Egg(**dd.io.load(os.path.dirname(os.path.abspath(__file__)) + '/data/' + dataset + '.egg'))
else:
# open pickled egg
try:
with open(os.path.dirname(os.path.abspath(__file__)) + '/data/' + dataset + '.egg', 'rb') as handle:
egg = pickle.load(handle)
except:
f = dd.io.load(os.path.dirname(os.path.abspath(__file__)) + '/data/' + dataset + '.egg')
egg = Egg(pres=f['pres'], rec=f['rec'], dist_funcs=f['dist_funcs'],
subjgroup=f['subjgroup'], subjname=f['subjname'],
listgroup=f['listgroup'], listname=f['listname'],
date_created=f['date_created'])
return egg.crack() |
Retrieve values for graphs.
def retrieveVals(self):
"""Retrieve values for graphs."""
fs = FSinfo(self._fshost, self._fsport, self._fspass)
if self.hasGraph('fs_calls'):
count = fs.getCallCount()
self.setGraphVal('fs_calls', 'calls', count)
if self.hasGraph('fs_channels'):
count = fs.getChannelCount()
self.setGraphVal('fs_channels', 'channels', count) |
Implements Munin Plugin Auto-Configuration Option.
@return: True if plugin can be auto-configured, False otherwise.
def autoconf(self):
"""Implements Munin Plugin Auto-Configuration Option.
@return: True if plugin can be auto-configured, False otherwise.
"""
fs = FSinfo(self._fshost, self._fsport, self._fspass)
return fs is not None |
Solve the sparse linear system Ax=b, where b may be a vector or a matrix.
Parameters
----------
A : ndarray or sparse matrix
The square matrix A will be converted into CSC or CSR form
b : ndarray or sparse matrix
The matrix or vector representing the right hand side of the equation.
Returns
-------
x : ndarray or sparse matrix
the solution of the sparse linear equation.
If b is a vector, then x is a vector of size A.shape[0]
If b is a matrix, then x is a matrix of size (A.shape[0],)+b.shape[1:]
def spsolve(A, b):
"""Solve the sparse linear system Ax=b, where b may be a vector or a matrix.
Parameters
----------
A : ndarray or sparse matrix
The square matrix A will be converted into CSC or CSR form
b : ndarray or sparse matrix
The matrix or vector representing the right hand side of the equation.
Returns
-------
x : ndarray or sparse matrix
the solution of the sparse linear equation.
If b is a vector, then x is a vector of size A.shape[0]
If b is a matrix, then x is a matrix of size (A.shape[0],)+b.shape[1:]
"""
x = UmfpackLU(A).solve(b)
if b.ndim == 2 and b.shape[1] == 1:
# compatibility with scipy.sparse.spsolve quirk
return x.ravel()
else:
return x |
Solve linear equation A x = b for x
Parameters
----------
b : ndarray
Right-hand side of the matrix equation. Can be vector or a matrix.
Returns
-------
x : ndarray
Solution to the matrix equation
def solve(self, b):
"""
Solve linear equation A x = b for x
Parameters
----------
b : ndarray
Right-hand side of the matrix equation. Can be vector or a matrix.
Returns
-------
x : ndarray
Solution to the matrix equation
"""
if isspmatrix(b):
b = b.toarray()
if b.shape[0] != self._A.shape[1]:
raise ValueError("Shape of b is not compatible with that of A")
b_arr = asarray(b, dtype=self._A.dtype).reshape(b.shape[0], -1)
x = np.zeros((self._A.shape[0], b_arr.shape[1]), dtype=self._A.dtype)
for j in range(b_arr.shape[1]):
x[:,j] = self.umf.solve(UMFPACK_A, self._A, b_arr[:,j], autoTranspose=True)
return x.reshape((self._A.shape[0],) + b.shape[1:]) |
Solve linear equation of the form A X = B. Where B and X are sparse matrices.
Parameters
----------
B : any scipy.sparse matrix
Right-hand side of the matrix equation.
Note: it will be converted to csc_matrix via `.tocsc()`.
Returns
-------
X : csc_matrix
Solution to the matrix equation as a csc_matrix
def solve_sparse(self, B):
"""
Solve linear equation of the form A X = B. Where B and X are sparse matrices.
Parameters
----------
B : any scipy.sparse matrix
Right-hand side of the matrix equation.
Note: it will be converted to csc_matrix via `.tocsc()`.
Returns
-------
X : csc_matrix
Solution to the matrix equation as a csc_matrix
"""
B = B.tocsc()
cols = list()
for j in xrange(B.shape[1]):
col = self.solve(B[:,j])
cols.append(csc_matrix(col))
return hstack(cols) |
Computes recall matrix given list of presented and list of recalled words
Parameters
----------
egg : quail.Egg
Data to analyze
match : str (exact, best or smooth)
Matching approach to compute recall matrix. If exact, the presented and
recalled items must be identical (default). If best, the recalled item
that is most similar to the presented items will be selected. If smooth,
a weighted average of all presented items will be used, where the
weights are derived from the similarity between the recalled item and
each presented item.
distance : str
The distance function used to compare presented and recalled items.
Applies only to 'best' and 'smooth' matching approaches. Can be any
distance function supported by numpy.spatial.distance.cdist.
Returns
----------
recall_matrix : list of lists of ints
each integer represents the presentation position of the recalled word in a given list in order of recall
0s represent recalled words not presented
negative ints represent words recalled from previous lists
def recall_matrix(egg, match='exact', distance='euclidean', features=None):
"""
Computes recall matrix given list of presented and list of recalled words
Parameters
----------
egg : quail.Egg
Data to analyze
match : str (exact, best or smooth)
Matching approach to compute recall matrix. If exact, the presented and
recalled items must be identical (default). If best, the recalled item
that is most similar to the presented items will be selected. If smooth,
a weighted average of all presented items will be used, where the
weights are derived from the similarity between the recalled item and
each presented item.
distance : str
The distance function used to compare presented and recalled items.
Applies only to 'best' and 'smooth' matching approaches. Can be any
distance function supported by numpy.spatial.distance.cdist.
Returns
----------
recall_matrix : list of lists of ints
each integer represents the presentation position of the recalled word in a given list in order of recall
0s represent recalled words not presented
negative ints represent words recalled from previous lists
"""
if match in ['best', 'smooth']:
if not features:
features = [k for k,v in egg.pres.loc[0][0].values[0].items() if k!='item']
if not features:
raise('No features found. Cannot match with best or smooth strategy')
if not isinstance(features, list):
features = [features]
if match=='exact':
features=['item']
return _recmat_exact(egg.pres, egg.rec, features)
else:
return _recmat_smooth(egg.pres, egg.rec, features, distance, match) |
Retrieve values for graphs.
def retrieveVals(self):
"""Retrieve values for graphs."""
net_info = NetstatInfo()
if self.hasGraph('netstat_conn_status'):
stats = net_info.getTCPportConnStatus(include_listen=True)
for fname in ('listen', 'established', 'syn_sent', 'syn_recv',
'fin_wait1', 'fin_wait2', 'time_wait',
'close','close_wait', 'last_ack', 'closing',
'unknown',):
self.setGraphVal('netstat_conn_status', fname,
stats.get(fname,0))
if self.hasGraph('netstat_conn_server'):
stats = net_info.getTCPportConnCount(localport=self._port_list)
for srv in self._srv_list:
numconn = 0
for port in self._srv_dict[srv]:
numconn += stats.get(port, 0)
self.setGraphVal('netstat_conn_server', srv, numconn) |
Parses FreePBX configuration file /etc/amportal for user and password
for Asterisk Manager Interface.
@return: True if configuration file is found and parsed successfully.
def _parseFreePBXconf(self):
"""Parses FreePBX configuration file /etc/amportal for user and password
for Asterisk Manager Interface.
@return: True if configuration file is found and parsed successfully.
"""
amiuser = None
amipass = None
if os.path.isfile(confFileFreePBX):
try:
fp = open(confFileFreePBX, 'r')
data = fp.read()
fp.close()
except:
raise IOError('Failed reading FreePBX configuration file: %s'
% confFileFreePBX)
for (key, val) in re.findall('^(AMPMGR\w+)\s*=\s*(\S+)\s*$',
data, re.MULTILINE):
if key == 'AMPMGRUSER':
amiuser = val
elif key == 'AMPMGRPASS':
amipass = val
if amiuser and amipass:
self._amiuser = amiuser
self._amipass = amipass
return True
return False |
Parses Asterisk configuration file /etc/asterisk/manager.conf for
user and password for Manager Interface. Returns True on success.
@return: True if configuration file is found and parsed successfully.
def _parseAsteriskConf(self):
"""Parses Asterisk configuration file /etc/asterisk/manager.conf for
user and password for Manager Interface. Returns True on success.
@return: True if configuration file is found and parsed successfully.
"""
if os.path.isfile(confFileAMI):
try:
fp = open(confFileAMI, 'r')
data = fp.read()
fp.close()
except:
raise IOError('Failed reading Asterisk configuration file: %s'
% confFileAMI)
mobj = re.search('^\[(\w+)\]\s*\r{0,1}\nsecret\s*=\s*(\S+)\s*$',
data, re.MULTILINE)
if mobj:
self._amiuser = mobj.group(1)
self._amipass = mobj.group(2)
return True
return False |
Connect to Asterisk Manager Interface.
def _connect(self):
"""Connect to Asterisk Manager Interface."""
try:
if sys.version_info[:2] >= (2,6):
self._conn = telnetlib.Telnet(self._amihost, self._amiport,
connTimeout)
else:
self._conn = telnetlib.Telnet(self._amihost, self._amiport)
except:
raise Exception(
"Connection to Asterisk Manager Interface on "
"host %s and port %s failed."
% (self._amihost, self._amiport)
) |
Send action to Asterisk Manager Interface.
@param action: Action name
@param attrs: Tuple of key-value pairs for action attributes.
@param chan_vars: Tuple of key-value pairs for channel variables.
def _sendAction(self, action, attrs=None, chan_vars=None):
"""Send action to Asterisk Manager Interface.
@param action: Action name
@param attrs: Tuple of key-value pairs for action attributes.
@param chan_vars: Tuple of key-value pairs for channel variables.
"""
self._conn.write("Action: %s\r\n" % action)
if attrs:
for (key,val) in attrs:
self._conn.write("%s: %s\r\n" % (key, val))
if chan_vars:
for (key,val) in chan_vars:
self._conn.write("Variable: %s=%s\r\n" % (key, val))
self._conn.write("\r\n") |
Read and parse response from Asterisk Manager Interface.
@return: Dictionary with response key-value pairs.
def _getResponse(self):
"""Read and parse response from Asterisk Manager Interface.
@return: Dictionary with response key-value pairs.
"""
resp_dict= dict()
resp_str = self._conn.read_until("\r\n\r\n", connTimeout)
for line in resp_str.split("\r\n"):
mobj = re.match('(\w+):\s*(\S.*)$', line);
if mobj:
resp_dict[mobj.group(1)] = mobj.group(2)
else:
mobj = re.match('(.*)--END COMMAND--\s*$', line, flags=re.DOTALL)
if mobj:
resp_dict['command_response'] = mobj.group(1)
return resp_dict |
Read and parse Asterisk Manager Interface Greeting to determine and
set Manager Interface version.
def _getGreeting(self):
"""Read and parse Asterisk Manager Interface Greeting to determine and
set Manager Interface version.
"""
greeting = self._conn.read_until("\r\n", connTimeout)
mobj = re.match('Asterisk Call Manager\/([\d\.]+)\s*$', greeting)
if mobj:
self._ami_version = util.SoftwareVersion(mobj.group(1))
else:
raise Exception("Asterisk Manager Interface version cannot be determined.") |
Query Asterisk Manager Interface for Asterisk Version to configure
system for compatibility with multiple versions
.
CLI Command - core show version
def _initAsteriskVersion(self):
"""Query Asterisk Manager Interface for Asterisk Version to configure
system for compatibility with multiple versions
.
CLI Command - core show version
"""
if self._ami_version > util.SoftwareVersion('1.0'):
cmd = "core show version"
else:
cmd = "show version"
cmdresp = self.executeCommand(cmd)
mobj = re.match('Asterisk\s*(SVN-branch-|\s)(\d+(\.\d+)*)', cmdresp)
if mobj:
self._asterisk_version = util.SoftwareVersion(mobj.group(2))
else:
raise Exception('Asterisk version cannot be determined.') |
Login to Asterisk Manager Interface.
def _login(self):
"""Login to Asterisk Manager Interface."""
self._sendAction("login", (
("Username", self._amiuser),
("Secret", self._amipass),
("Events", "off"),
))
resp = self._getResponse()
if resp.get("Response") == "Success":
return True
else:
raise Exception("Authentication to Asterisk Manager Interface Failed.") |
Send Action to Asterisk Manager Interface to execute CLI Command.
@param command: CLI command to execute.
@return: Command response string.
def executeCommand(self, command):
"""Send Action to Asterisk Manager Interface to execute CLI Command.
@param command: CLI command to execute.
@return: Command response string.
"""
self._sendAction("Command", (
("Command", command),
))
resp = self._getResponse()
result = resp.get("Response")
if result == "Follows":
return resp.get("command_response")
elif result == "Error":
raise Exception("Execution of Asterisk Manager Interface Command "
"(%s) failed with error message: %s" %
(command, str(resp.get("Message"))))
else:
raise Exception("Execution of Asterisk Manager Interface Command "
"failed: %s" % command) |
Query Asterisk Manager Interface to initialize internal list of
loaded modules.
CLI Command - core show modules
def _initModuleList(self):
"""Query Asterisk Manager Interface to initialize internal list of
loaded modules.
CLI Command - core show modules
"""
if self.checkVersion('1.4'):
cmd = "module show"
else:
cmd = "show modules"
cmdresp = self.executeCommand(cmd)
self._modules = set()
for line in cmdresp.splitlines()[1:-1]:
mobj = re.match('\s*(\S+)\s', line)
if mobj:
self._modules.add(mobj.group(1).lower()) |
Query Asterisk Manager Interface to initialize internal list of
available applications.
CLI Command - core show applications
def _initApplicationList(self):
"""Query Asterisk Manager Interface to initialize internal list of
available applications.
CLI Command - core show applications
"""
if self.checkVersion('1.4'):
cmd = "core show applications"
else:
cmd = "show applications"
cmdresp = self.executeCommand(cmd)
self._applications = set()
for line in cmdresp.splitlines()[1:-1]:
mobj = re.match('\s*(\S+):', line)
if mobj:
self._applications.add(mobj.group(1).lower()) |
Query Asterisk Manager Interface to initialize internal list of
supported channel types.
CLI Command - core show applications
def _initChannelTypesList(self):
"""Query Asterisk Manager Interface to initialize internal list of
supported channel types.
CLI Command - core show applications
"""
if self.checkVersion('1.4'):
cmd = "core show channeltypes"
else:
cmd = "show channeltypes"
cmdresp = self.executeCommand(cmd)
self._chantypes = set()
for line in cmdresp.splitlines()[2:]:
mobj = re.match('\s*(\S+)\s+.*\s+(yes|no)\s+', line)
if mobj:
self._chantypes.add(mobj.group(1).lower()) |
Returns True if mod is among the loaded modules.
@param mod: Module name.
@return: Boolean
def hasModule(self, mod):
"""Returns True if mod is among the loaded modules.
@param mod: Module name.
@return: Boolean
"""
if self._modules is None:
self._initModuleList()
return mod in self._modules |
Returns True if app is among the loaded modules.
@param app: Module name.
@return: Boolean
def hasApplication(self, app):
"""Returns True if app is among the loaded modules.
@param app: Module name.
@return: Boolean
"""
if self._applications is None:
self._initApplicationList()
return app in self._applications |
Returns True if chan is among the supported channel types.
@param app: Module name.
@return: Boolean
def hasChannelType(self, chan):
"""Returns True if chan is among the supported channel types.
@param app: Module name.
@return: Boolean
"""
if self._chantypes is None:
self._initChannelTypesList()
return chan in self._chantypes |
Query Asterisk Manager Interface for defined codecs.
CLI Command - core show codecs
@return: Dictionary - Short Name -> (Type, Long Name)
def getCodecList(self):
"""Query Asterisk Manager Interface for defined codecs.
CLI Command - core show codecs
@return: Dictionary - Short Name -> (Type, Long Name)
"""
if self.checkVersion('1.4'):
cmd = "core show codecs"
else:
cmd = "show codecs"
cmdresp = self.executeCommand(cmd)
info_dict = {}
for line in cmdresp.splitlines():
mobj = re.match('\s*(\d+)\s+\((.+)\)\s+\((.+)\)\s+(\w+)\s+(\w+)\s+\((.+)\)$',
line)
if mobj:
info_dict[mobj.group(5)] = (mobj.group(4), mobj.group(6))
return info_dict |
Query Asterisk Manager Interface for Channel Stats.
CLI Command - core show channels
@return: Dictionary of statistics counters for channels.
Number of active channels for each channel type.
def getChannelStats(self, chantypes=('dahdi', 'zap', 'sip', 'iax2', 'local')):
"""Query Asterisk Manager Interface for Channel Stats.
CLI Command - core show channels
@return: Dictionary of statistics counters for channels.
Number of active channels for each channel type.
"""
if self.checkVersion('1.4'):
cmd = "core show channels"
else:
cmd = "show channels"
cmdresp = self.executeCommand(cmd)
info_dict ={}
for chanstr in chantypes:
chan = chanstr.lower()
if chan in ('zap', 'dahdi'):
info_dict['dahdi'] = 0
info_dict['mix'] = 0
else:
info_dict[chan] = 0
for k in ('active_calls', 'active_channels', 'calls_processed'):
info_dict[k] = 0
regexstr = ('(%s)\/(\w+)' % '|'.join(chantypes))
for line in cmdresp.splitlines():
mobj = re.match(regexstr,
line, re.IGNORECASE)
if mobj:
chan_type = mobj.group(1).lower()
chan_id = mobj.group(2).lower()
if chan_type == 'dahdi' or chan_type == 'zap':
if chan_id == 'pseudo':
info_dict['mix'] += 1
else:
info_dict['dahdi'] += 1
else:
info_dict[chan_type] += 1
continue
mobj = re.match('(\d+)\s+(active channel|active call|calls processed)',
line, re.IGNORECASE)
if mobj:
if mobj.group(2) == 'active channel':
info_dict['active_channels'] = int(mobj.group(1))
elif mobj.group(2) == 'active call':
info_dict['active_calls'] = int(mobj.group(1))
elif mobj.group(2) == 'calls processed':
info_dict['calls_processed'] = int(mobj.group(1))
continue
return info_dict |
Query Asterisk Manager Interface for SIP / IAX2 Peer Stats.
CLI Command - sip show peers
iax2 show peers
@param chantype: Must be 'sip' or 'iax2'.
@return: Dictionary of statistics counters for VoIP Peers.
def getPeerStats(self, chantype):
"""Query Asterisk Manager Interface for SIP / IAX2 Peer Stats.
CLI Command - sip show peers
iax2 show peers
@param chantype: Must be 'sip' or 'iax2'.
@return: Dictionary of statistics counters for VoIP Peers.
"""
chan = chantype.lower()
if not self.hasChannelType(chan):
return None
if chan == 'iax2':
cmd = "iax2 show peers"
elif chan == 'sip':
cmd = "sip show peers"
else:
raise AttributeError("Invalid channel type in query for Peer Stats.")
cmdresp = self.executeCommand(cmd)
info_dict = dict(
online = 0, unreachable = 0, lagged = 0,
unknown = 0, unmonitored = 0)
for line in cmdresp.splitlines():
if re.search('ok\s+\(\d+\s+ms\)\s*$', line, re.IGNORECASE):
info_dict['online'] += 1
else:
mobj = re.search('(unreachable|lagged|unknown|unmonitored)\s*$',
line, re.IGNORECASE)
if mobj:
info_dict[mobj.group(1).lower()] += 1
return info_dict |
Query Asterisk Manager Interface for SIP / IAX2 Channel / Codec Stats.
CLI Commands - sip show channels
iax2 show channnels
@param chantype: Must be 'sip' or 'iax2'.
@param codec_list: List of codec names to parse.
(Codecs not in the list are summed up to the other
count.)
@return: Dictionary of statistics counters for Active VoIP
Channels.
def getVoIPchanStats(self, chantype,
codec_list=('ulaw', 'alaw', 'gsm', 'g729')):
"""Query Asterisk Manager Interface for SIP / IAX2 Channel / Codec Stats.
CLI Commands - sip show channels
iax2 show channnels
@param chantype: Must be 'sip' or 'iax2'.
@param codec_list: List of codec names to parse.
(Codecs not in the list are summed up to the other
count.)
@return: Dictionary of statistics counters for Active VoIP
Channels.
"""
chan = chantype.lower()
if not self.hasChannelType(chan):
return None
if chan == 'iax2':
cmd = "iax2 show channels"
elif chan == 'sip':
cmd = "sip show channels"
else:
raise AttributeError("Invalid channel type in query for Channel Stats.")
cmdresp = self.executeCommand(cmd)
lines = cmdresp.splitlines()
headers = re.split('\s\s+', lines[0])
try:
idx = headers.index('Format')
except ValueError:
try:
idx = headers.index('Form')
except:
raise Exception("Error in parsing header line of %s channel stats."
% chan)
codec_list = tuple(codec_list) + ('other', 'none')
info_dict = dict([(k,0) for k in codec_list])
for line in lines[1:-1]:
codec = None
cols = re.split('\s\s+', line)
colcodec = cols[idx]
mobj = re.match('0x\w+\s\((\w+)\)$', colcodec)
if mobj:
codec = mobj.group(1).lower()
elif re.match('\w+$', colcodec):
codec = colcodec.lower()
if codec:
if codec in info_dict:
info_dict[codec] += 1
elif codec == 'nothing' or codec[0:4] == 'unkn':
info_dict['none'] += 1
else:
info_dict['other'] += 1
return info_dict |
Query Asterisk Manager Interface for Conference Room Stats.
CLI Command - meetme list
@return: Dictionary of statistics counters for Conference Rooms.
def getConferenceStats(self):
"""Query Asterisk Manager Interface for Conference Room Stats.
CLI Command - meetme list
@return: Dictionary of statistics counters for Conference Rooms.
"""
if not self.hasConference():
return None
if self.checkVersion('1.6'):
cmd = "meetme list"
else:
cmd = "meetme"
cmdresp = self.executeCommand(cmd)
info_dict = dict(active_conferences = 0, conference_users = 0)
for line in cmdresp.splitlines():
mobj = re.match('\w+\s+0(\d+)\s', line)
if mobj:
info_dict['active_conferences'] += 1
info_dict['conference_users'] += int(mobj.group(1))
return info_dict |
Query Asterisk Manager Interface for Voicemail Stats.
CLI Command - voicemail show users
@return: Dictionary of statistics counters for Voicemail Accounts.
def getVoicemailStats(self):
"""Query Asterisk Manager Interface for Voicemail Stats.
CLI Command - voicemail show users
@return: Dictionary of statistics counters for Voicemail Accounts.
"""
if not self.hasVoicemail():
return None
if self.checkVersion('1.4'):
cmd = "voicemail show users"
else:
cmd = "show voicemail users"
cmdresp = self.executeCommand(cmd)
info_dict = dict(accounts = 0, avg_messages = 0, max_messages = 0,
total_messages = 0)
for line in cmdresp.splitlines():
mobj = re.match('\w+\s+\w+\s+.*\s+(\d+)\s*$', line)
if mobj:
msgs = int(mobj.group(1))
info_dict['accounts'] += 1
info_dict['total_messages'] += msgs
if msgs > info_dict['max_messages']:
info_dict['max_messages'] = msgs
if info_dict['accounts'] > 0:
info_dict['avg_messages'] = (float(info_dict['total_messages'])
/ info_dict['accounts'])
return info_dict |
Query Asterisk Manager Interface for Trunk Stats.
CLI Command - core show channels
@param trunkList: List of tuples of one of the two following types:
(Trunk Name, Regular Expression)
(Trunk Name, Regular Expression, MIN, MAX)
@return: Dictionary of trunk utilization statistics.
def getTrunkStats(self, trunkList):
"""Query Asterisk Manager Interface for Trunk Stats.
CLI Command - core show channels
@param trunkList: List of tuples of one of the two following types:
(Trunk Name, Regular Expression)
(Trunk Name, Regular Expression, MIN, MAX)
@return: Dictionary of trunk utilization statistics.
"""
re_list = []
info_dict = {}
for filt in trunkList:
info_dict[filt[0]] = 0
re_list.append(re.compile(filt[1], re.IGNORECASE))
if self.checkVersion('1.4'):
cmd = "core show channels"
else:
cmd = "show channels"
cmdresp = self.executeCommand(cmd)
for line in cmdresp.splitlines():
for idx in range(len(re_list)):
recomp = re_list[idx]
trunkid = trunkList[idx][0]
mobj = recomp.match(line)
if mobj:
if len(trunkList[idx]) == 2:
info_dict[trunkid] += 1
continue
elif len(trunkList[idx]) == 4:
num = mobj.groupdict().get('num')
if num is not None:
(vmin,vmax) = trunkList[idx][2:4]
if int(num) >= int(vmin) and int(num) <= int(vmax):
info_dict[trunkid] += 1
continue
return info_dict |
Query Asterisk Manager Interface for Queue Stats.
CLI Command: queue show
@return: Dictionary of queue stats.
def getQueueStats(self):
"""Query Asterisk Manager Interface for Queue Stats.
CLI Command: queue show
@return: Dictionary of queue stats.
"""
if not self.hasQueue():
return None
info_dict = {}
if self.checkVersion('1.4'):
cmd = "queue show"
else:
cmd = "show queues"
cmdresp = self.executeCommand(cmd)
queue = None
ctxt = None
member_states = ("unknown", "not in use", "in use", "busy", "invalid",
"unavailable", "ringing", "ring+inuse", "on hold",
"total")
member_state_dict = dict([(k.lower().replace(' ', '_'),0)
for k in member_states])
for line in cmdresp.splitlines():
mobj = re.match(r"([\w\-]+)\s+has\s+(\d+)\s+calls\s+"
r"\(max (\d+|unlimited)\)\s+in\s+'(\w+)'\s+strategy\s+"
r"\((.+)\),\s+W:(\d+),\s+C:(\d+),\s+A:(\d+),\s+"
r"SL:([\d\.]+)%\s+within\s+(\d+)s", line)
if mobj:
ctxt = None
queue = mobj.group(1)
info_dict[queue] = {}
info_dict[queue]['queue_len'] = int(mobj.group(2))
try:
info_dict[queue]['queue_maxlen'] = int(mobj.group(3))
except ValueError:
info_dict[queue]['queue_maxlen'] = None
info_dict[queue]['strategy'] = mobj.group(4)
for tkn in mobj.group(5).split(','):
mobjx = re.match(r"\s*(\d+)s\s+(\w+)\s*", tkn)
if mobjx:
info_dict[queue]['avg_' + mobjx.group(2)] = int(mobjx.group(1))
info_dict[queue]['queue_weight'] = int(mobj.group(6))
info_dict[queue]['calls_completed'] = int(mobj.group(7))
info_dict[queue]['calls_abandoned'] = int(mobj.group(8))
info_dict[queue]['sla_pcent'] = float(mobj.group(9))
info_dict[queue]['sla_cutoff'] = int(mobj.group(10))
info_dict[queue]['members'] = member_state_dict.copy()
continue
mobj = re.match('\s+(Members|Callers):\s*$', line)
if mobj:
ctxt = mobj.group(1).lower()
continue
if ctxt == 'members':
mobj = re.match(r"\s+\S.*\s\((.*)\)\s+has\s+taken.*calls", line)
if mobj:
info_dict[queue]['members']['total'] += 1
state = mobj.group(1).lower().replace(' ', '_')
if info_dict[queue]['members'].has_key(state):
info_dict[queue]['members'][state] += 1
else:
raise AttributeError("Undefined queue member state %s"
% state)
continue
return info_dict |
Query Asterisk Manager Interface for Fax Stats.
CLI Command - fax show stats
@return: Dictionary of fax stats.
def getFaxStatsCounters(self):
"""Query Asterisk Manager Interface for Fax Stats.
CLI Command - fax show stats
@return: Dictionary of fax stats.
"""
if not self.hasFax():
return None
info_dict = {}
cmdresp = self.executeCommand('fax show stats')
ctxt = 'general'
for section in cmdresp.strip().split('\n\n')[1:]:
i = 0
for line in section.splitlines():
mobj = re.match('(\S.*\S)\s*:\s*(\d+)\s*$', line)
if mobj:
if not info_dict.has_key(ctxt):
info_dict[ctxt] = {}
info_dict[ctxt][mobj.group(1).lower()] = int(mobj.group(2).lower())
elif i == 0:
ctxt = line.strip().lower()
i += 1
return info_dict |
Query Asterisk Manager Interface for Fax Stats.
CLI Command - fax show sessions
@return: Dictionary of fax stats.
def getFaxStatsSessions(self):
"""Query Asterisk Manager Interface for Fax Stats.
CLI Command - fax show sessions
@return: Dictionary of fax stats.
"""
if not self.hasFax():
return None
info_dict = {}
info_dict['total'] = 0
fax_types = ('g.711', 't.38')
fax_operations = ('send', 'recv')
fax_states = ('uninitialized', 'initialized', 'open',
'active', 'inactive', 'complete', 'unknown',)
info_dict['type'] = dict([(k,0) for k in fax_types])
info_dict['operation'] = dict([(k,0) for k in fax_operations])
info_dict['state'] = dict([(k,0) for k in fax_states])
cmdresp = self.executeCommand('fax show sessions')
sections = cmdresp.strip().split('\n\n')
if len(sections) >= 3:
for line in sections[1][1:]:
cols = re.split('\s\s+', line)
if len(cols) == 7:
info_dict['total'] += 1
if cols[3].lower() in fax_types:
info_dict['type'][cols[3].lower()] += 1
if cols[4] == 'receive':
info_dict['operation']['recv'] += 1
elif cols[4] == 'send':
info_dict['operation']['send'] += 1
if cols[5].lower() in fax_states:
info_dict['state'][cols[5].lower()] += 1
return info_dict |
Retrieve values for graphs.
def retrieveVals(self):
"""Retrieve values for graphs."""
for iface in self._ifaceList:
if self._reqIfaceList is None or iface in self._reqIfaceList:
if (self.graphEnabled('wanpipe_traffic')
or self.graphEnabled('wanpipe_errors')):
stats = self._ifaceStats.get(iface)
if stats:
graph_name = 'wanpipe_traffic_%s' % iface
if self.hasGraph(graph_name):
for field in ('rxpackets', 'txpackets'):
self.setGraphVal(graph_name, field,
stats.get(field))
graph_name = 'wanpipe_errors_%s' % iface
if self.hasGraph(graph_name):
for field in ('rxerrs', 'txerrs', 'rxframe', 'txcarrier',
'rxdrop', 'txdrop', 'rxfifo', 'txfifo'):
self.setGraphVal(graph_name, field,
stats.get(field))
if (self.graphEnabled('wanpipe_pri_errors')
or self.graphEnabled('wanpipe_rxlevel')):
try:
stats = self._wanpipeInfo.getPRIstats(iface)
except:
stats = None
if stats:
graph_name = 'wanpipe_pri_errors_%s' % iface
if self.hasGraph(graph_name):
for field in ('linecodeviolation',
'farendblockerrors',
'crc4errors', 'faserrors'):
self.setGraphVal(graph_name, field,
stats.get(field))
if self.hasGraph('wanpipe_rxlevel'):
self.setGraphVal('wanpipe_pri_rxlevel',
iface, stats.get('rxlevel')) |
A function to simulate a list
def simulate_list(nwords=16, nrec=10, ncats=4):
"""A function to simulate a list"""
# load wordpool
wp = pd.read_csv('data/cut_wordpool.csv')
# get one list
wp = wp[wp['GROUP']==np.random.choice(list(range(16)), 1)[0]].sample(16)
wp['COLOR'] = [[int(np.random.rand() * 255) for i in range(3)] for i in range(16)] |
Retrieve values for graphs.
def retrieveVals(self):
"""Retrieve values for graphs."""
if self._genStats is None:
self._genStats = self._dbconn.getStats()
if self._genVars is None:
self._genVars = self._dbconn.getParams()
if self.hasGraph('mysql_connections'):
self.setGraphVal('mysql_connections', 'conn',
self._genStats.get('Connections'))
self.setGraphVal('mysql_connections', 'abort_conn',
self._genStats.get('Aborted_connects'))
self.setGraphVal('mysql_connections', 'abort_client',
self._genStats.get('Aborted_clients'))
if self.hasGraph('mysql_traffic'):
self.setGraphVal('mysql_traffic', 'rx',
self._genStats.get('Bytes_received'))
self.setGraphVal('mysql_traffic', 'tx',
self._genStats.get('Bytes_sent'))
if self.graphEnabled('mysql_slowqueries'):
self.setGraphVal('mysql_slowqueries', 'queries',
self._genStats.get('Slow_queries'))
if self.hasGraph('mysql_rowmodifications'):
self.setGraphVal('mysql_rowmodifications', 'insert',
self._genStats.get('Handler_write'))
self.setGraphVal('mysql_rowmodifications', 'update',
self._genStats.get('Handler_update'))
self.setGraphVal('mysql_rowmodifications', 'delete',
self._genStats.get('Handler_delete'))
if self.hasGraph('mysql_rowreads'):
for field in self.getGraphFieldList('mysql_rowreads'):
self.setGraphVal('mysql_rowreads', field,
self._genStats.get('Handler_read_%s' % field))
if self.hasGraph('mysql_tablelocks'):
self.setGraphVal('mysql_tablelocks', 'waited',
self._genStats.get('Table_locks_waited'))
self.setGraphVal('mysql_tablelocks', 'immediate',
self._genStats.get('Table_locks_immediate'))
if self.hasGraph('mysql_threads'):
self.setGraphVal('mysql_threads', 'running',
self._genStats.get('Threads_running'))
self.setGraphVal('mysql_threads', 'idle',
self._genStats.get('Threads_connected')
- self._genStats.get('Threads_running'))
self.setGraphVal('mysql_threads', 'cached',
self._genStats.get('Threads_cached'))
self.setGraphVal('mysql_threads', 'total',
self._genStats.get('Threads_connected')
+ self._genStats.get('Threads_cached'))
if self.hasGraph('mysql_commits_rollbacks'):
self.setGraphVal('mysql_commits_rollbacks', 'commit',
self._genStats.get('Handler_commit'))
self.setGraphVal('mysql_commits_rollbacks', 'rollback',
self._genStats.get('Handler_rollback'))
if self.hasGraph('mysql_qcache_memory'):
try:
total = self._genVars['query_cache_size']
free = self._genStats['Qcache_free_memory']
used = total - free
except KeyError:
free = None
used = None
self.setGraphVal('mysql_qcache_memory', 'used', used)
self.setGraphVal('mysql_qcache_memory', 'free', free)
if self.hasGraph('mysql_qcache_hits'):
try:
hits = self._genStats['Qcache_hits']
misses = self._genStats['Com_select'] - hits
except KeyError:
hits = None
misses = None
self.setGraphVal('mysql_qcache_hits', 'hits', hits)
self.setGraphVal('mysql_qcache_hits', 'misses', misses)
if self.hasGraph('mysql_qcache_prunes'):
self.setGraphVal('mysql_qcache_prunes', 'insert',
self._genStats.get('Qcache_inserts'))
self.setGraphVal('mysql_qcache_prunes', 'prune',
self._genStats.get('Qcache_lowmem_prunes'))
if self.hasGraph('mysql_proc_status'):
self._procStatus = self._dbconn.getProcessStatus()
if self._procStatus:
stats = {}
for field in self.getGraphFieldList('mysql_proc_status'):
stats[field] = 0
for (k, v) in self._procStatus.items():
if stats.has_key(k):
stats[k] = v
else:
stats['unknown'] += v
for (k,v) in stats.items():
self.setGraphVal('mysql_proc_status', k, v)
if self.hasGraph('mysql_proc_db'):
self._procDB = self._dbconn.getProcessDatabase()
for db in self._dbList:
self.setGraphVal('mysql_proc_db', db, self._procDB.get(db, 0))
if self.engineIncluded('myisam'):
if self.hasGraph('mysql_myisam_key_buffer_util'):
try:
bsize = self._genVars['key_cache_block_size']
total = self._genVars['key_buffer_size']
free = self._genStats['Key_blocks_unused'] * bsize
dirty = self._genStats['Key_blocks_not_flushed'] * bsize
clean = total - free - dirty
except KeyError:
total = None
free = None
dirty = None
clean = None
for (field,val) in (('dirty', dirty),
('clean', clean),
('free', free),
('total', total)):
self.setGraphVal('mysql_myisam_key_buffer_util',
field, val)
if self.hasGraph('mysql_myisam_key_read_reqs'):
try:
misses = self._genStats['Key_reads']
hits = (self._genStats['Key_read_requests']
- misses)
except KeyError:
misses = None
hits = None
self.setGraphVal('mysql_myisam_key_read_reqs', 'disk', misses)
self.setGraphVal('mysql_myisam_key_read_reqs', 'buffer', hits)
if self.engineIncluded('innodb'):
if self.hasGraph('mysql_innodb_buffer_pool_util'):
self._genStats['Innodb_buffer_pool_pages_clean'] = (
self._genStats.get('Innodb_buffer_pool_pages_data')
- self._genStats.get('Innodb_buffer_pool_pages_dirty'))
page_size = int(self._genStats.get('Innodb_page_size'))
for field in ('dirty', 'clean', 'misc', 'free', 'total'):
self.setGraphVal('mysql_innodb_buffer_pool_util',
field,
self._genStats.get('Innodb_buffer_pool_pages_%s'
% field)
* page_size)
if self.hasGraph('mysql_innodb_buffer_pool_activity'):
for field in ('created', 'read', 'written'):
self.setGraphVal('mysql_innodb_buffer_pool_activity', field,
self._genStats.get('Innodb_pages_%s' % field))
if self.hasGraph('mysql_innodb_buffer_pool_read_reqs'):
try:
misses = self._genStats['Innodb_buffer_pool_reads']
hits = (self._genStats['Innodb_buffer_pool_read_requests']
- misses)
except KeyError:
misses = None
hits = None
self.setGraphVal('mysql_innodb_buffer_pool_read_reqs', 'disk',
misses)
self.setGraphVal('mysql_innodb_buffer_pool_read_reqs', 'buffer',
hits)
if self.hasGraph('mysql_innodb_row_ops'):
for field in ('inserted', 'updated', 'deleted', 'read'):
self.setGraphVal('mysql_innodb_row_ops', field,
self._genStats.get('Innodb_rows_%s' % field)) |
Utility method to check if a storage engine is included in graphs.
@param name: Name of storage engine.
@return: Returns True if included in graphs, False otherwise.
def engineIncluded(self, name):
"""Utility method to check if a storage engine is included in graphs.
@param name: Name of storage engine.
@return: Returns True if included in graphs, False otherwise.
"""
if self._engines is None:
self._engines = self._dbconn.getStorageEngines()
return self.envCheckFilter('engine', name) and name in self._engines |
Get disk space usage.
@return: Dictionary of filesystem space utilization stats for filesystems.
def getSpaceUse(self):
"""Get disk space usage.
@return: Dictionary of filesystem space utilization stats for filesystems.
"""
stats = {}
try:
out = subprocess.Popen([dfCmd, "-Pk"],
stdout=subprocess.PIPE).communicate()[0]
except:
raise Exception('Execution of command %s failed.' % dfCmd)
lines = out.splitlines()
if len(lines) > 1:
for line in lines[1:]:
fsstats = {}
cols = line.split()
fsstats['device'] = cols[0]
fsstats['type'] = self._fstypeDict[cols[5]]
fsstats['total'] = 1024 * int(cols[1])
fsstats['inuse'] = 1024 * int(cols[2])
fsstats['avail'] = 1024 * int(cols[3])
fsstats['inuse_pcent'] = int(cols[4][:-1])
stats[cols[5]] = fsstats
return stats |
Retrieve values for graphs.
def retrieveVals(self):
"""Retrieve values for graphs."""
stats = self._dbconn.getDatabaseStats()
databases = stats.get('databases')
totals = stats.get('totals')
if self.hasGraph('pg_connections'):
limit = self._dbconn.getParam('max_connections')
self.setGraphVal('pg_connections', 'max_conn', limit)
for (db, dbstats) in databases.iteritems():
if self.dbIncluded(db):
self.setGraphVal('pg_connections', db,
dbstats['numbackends'])
self.setGraphVal('pg_connections', 'total', totals['numbackends'])
if self.hasGraph('pg_diskspace'):
for (db, dbstats) in databases.iteritems():
if self.dbIncluded(db):
self.setGraphVal('pg_diskspace', db, dbstats['disk_size'])
self.setGraphVal('pg_diskspace', 'total', totals['disk_size'])
if self.hasGraph('pg_blockreads'):
self.setGraphVal('pg_blockreads', 'blk_hit', totals['blks_hit'])
self.setGraphVal('pg_blockreads', 'blk_read', totals['blks_read'])
if self.hasGraph('pg_xact'):
self.setGraphVal('pg_xact', 'commits', totals['xact_commit'])
self.setGraphVal('pg_xact', 'rollbacks', totals['xact_rollback'])
if self.hasGraph('pg_tup_read'):
self.setGraphVal('pg_tup_read', 'fetch', totals['tup_fetched'])
self.setGraphVal('pg_tup_read', 'return', totals['tup_returned'])
if self.hasGraph('pg_tup_write'):
self.setGraphVal('pg_tup_write', 'delete', totals['tup_deleted'])
self.setGraphVal('pg_tup_write', 'update', totals['tup_updated'])
self.setGraphVal('pg_tup_write', 'insert', totals['tup_inserted'])
lock_stats = None
for lock_state in ('all', 'wait',):
graph_name = "pg_lock_%s" % lock_state
if self.hasGraph(graph_name):
if lock_stats is None:
lock_stats = self._dbconn.getLockStatsMode()
mode_iter = iter(PgInfo.lockModes)
for mode in ('AccessExcl', 'Excl', 'ShrRwExcl', 'Shr',
'ShrUpdExcl', 'RwExcl', 'RwShr', 'AccessShr',):
self.setGraphVal(graph_name, mode,
lock_stats[lock_state].get(mode_iter.next()))
stats = None
if self.hasGraph('pg_checkpoints'):
if stats is None:
stats = self._dbconn.getBgWriterStats()
self.setGraphVal('pg_checkpoints', 'req',
stats.get('checkpoints_req'))
self.setGraphVal('pg_checkpoints', 'timed',
stats.get('checkpoints_timed'))
if self.hasGraph('pg_bgwriter'):
if stats is None:
stats = self._dbconn.getBgWriterStats()
self.setGraphVal('pg_bgwriter', 'backend',
stats.get('buffers_backend'))
self.setGraphVal('pg_bgwriter', 'clean',
stats.get('buffers_clean'))
self.setGraphVal('pg_bgwriter', 'chkpoint',
stats.get('buffers_checkpoint'))
if self._detailGraphs:
for (db, dbstats) in databases.iteritems():
if self.dbIncluded(db):
if self.hasGraph('pg_blockread_detail'):
self.setGraphVal('pg_blockread_detail', db,
dbstats['blks_hit'] + dbstats['blks_read'])
for (graph_name, attr_name) in (
('pg_xact_commit_detail', 'xact_commit'),
('pg_xact_rollback_detail', 'xact_rollback'),
('pg_tup_return_detail', 'tup_returned'),
('pg_tup_fetch_detail', 'tup_fetched'),
('pg_tup_delete_detail', 'tup_deleted'),
('pg_tup_update_detail', 'tup_updated'),
('pg_tup_insert_detail', 'tup_inserted'),
):
if self.hasGraph(graph_name):
self.setGraphVal(graph_name, db, dbstats[attr_name])
lock_stats_db = None
for lock_state in ('all', 'wait',):
graph_name = "pg_lock_%s_detail" % lock_state
if self.hasGraph(graph_name):
if lock_stats_db is None:
lock_stats_db = self._dbconn.getLockStatsDB()
self.setGraphVal(graph_name, db,
lock_stats_db[lock_state].get(db, 0))
if self._replGraphs:
repl_stats = self._dbconn.getSlaveConflictStats()
if self.hasGraph('pg_repl_conflicts'):
for field in self.getGraphFieldList('pg_repl_conflicts'):
self.setGraphVal('pg_repl_conflicts', field,
repl_stats['totals'].get("confl_%s" % field))
if self._detailGraphs and self.hasGraph('pg_repl_conflicts_detail'):
for (db, dbstats) in repl_stats['databases'].iteritems():
if self.dbIncluded(db):
self.setGraphVal('pg_repl_conflicts_detail', db,
sum(dbstats.values())) |
Connects via a RS-485 to Ethernet adapter.
def connect(self, host, port):
"""Connects via a RS-485 to Ethernet adapter."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
self._reader = sock.makefile(mode='rb')
self._writer = sock.makefile(mode='wb') |
Process data; returns when the reader signals EOF.
Callback is notified when any data changes.
def process(self, data_changed_callback):
"""Process data; returns when the reader signals EOF.
Callback is notified when any data changes."""
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
while True:
byte = self._reader.read(1)
while True:
# Search for FRAME_DLE + FRAME_STX
if not byte:
return
if byte[0] == self.FRAME_DLE:
next_byte = self._reader.read(1)
if not next_byte:
return
if next_byte[0] == self.FRAME_STX:
break
else:
continue
byte = self._reader.read(1)
frame = bytearray()
byte = self._reader.read(1)
while True:
if not byte:
return
if byte[0] == self.FRAME_DLE:
# Should be FRAME_ETX or 0 according to
# the AQ-CO-SERIAL manual
next_byte = self._reader.read(1)
if not next_byte:
return
if next_byte[0] == self.FRAME_ETX:
break
elif next_byte[0] != 0:
# Error?
pass
frame.append(byte[0])
byte = self._reader.read(1)
# Verify CRC
frame_crc = int.from_bytes(frame[-2:], byteorder='big')
frame = frame[:-2]
calculated_crc = self.FRAME_DLE + self.FRAME_STX
for byte in frame:
calculated_crc += byte
if frame_crc != calculated_crc:
_LOGGER.warning('Bad CRC')
continue
frame_type = frame[0:2]
frame = frame[2:]
if frame_type == self.FRAME_TYPE_KEEP_ALIVE:
# Keep alive
# If a frame has been queued for transmit, send it.
if not self._send_queue.empty():
data = self._send_queue.get(block=False)
self._writer.write(data['frame'])
self._writer.flush()
_LOGGER.info('Sent: %s', binascii.hexlify(data['frame']))
try:
if data['desired_states'] is not None:
# Set a timer to verify the state changes
# Wait 2 seconds as it can take a while for
# the state to change.
Timer(2.0, self._check_state, [data]).start()
except KeyError:
pass
continue
elif frame_type == self.FRAME_TYPE_KEY_EVENT:
_LOGGER.info('Key: %s', binascii.hexlify(frame))
elif frame_type == self.FRAME_TYPE_LEDS:
_LOGGER.debug('LEDs: %s', binascii.hexlify(frame))
# First 4 bytes are the LEDs that are on;
# second 4 bytes_ are the LEDs that are flashing
states = int.from_bytes(frame[0:4], byteorder='little')
flashing_states = int.from_bytes(frame[4:8],
byteorder='little')
states |= flashing_states
if (states != self._states
or flashing_states != self._flashing_states):
self._states = states
self._flashing_states = flashing_states
data_changed_callback(self)
elif frame_type == self.FRAME_TYPE_PUMP_SPEED_REQUEST:
value = int.from_bytes(frame[0:2], byteorder='big')
_LOGGER.debug('Pump speed request: %d%%', value)
if self._pump_speed != value:
self._pump_speed = value
data_changed_callback(self)
elif frame_type == self.FRAME_TYPE_PUMP_STATUS:
# Pump status messages sent out by Hayward VSP pumps
self._multi_speed_pump = True
speed = frame[2]
# Power is in BCD
power = ((((frame[3] & 0xf0) >> 4) * 1000)
+ (((frame[3] & 0x0f)) * 100)
+ (((frame[4] & 0xf0) >> 4) * 10)
+ (((frame[4] & 0x0f))))
_LOGGER.debug('Pump speed: %d%%, power: %d watts',
speed, power)
if self._pump_power != power:
self._pump_power = power
data_changed_callback(self)
elif frame_type == self.FRAME_TYPE_DISPLAY_UPDATE:
parts = frame.decode('latin-1').split()
_LOGGER.debug('Display update: %s', parts)
try:
if parts[0] == 'Pool' and parts[1] == 'Temp':
# Pool Temp <temp>°[C|F]
value = int(parts[2][:-2])
if self._pool_temp != value:
self._pool_temp = value
self._is_metric = parts[2][-1:] == 'C'
data_changed_callback(self)
elif parts[0] == 'Spa' and parts[1] == 'Temp':
# Spa Temp <temp>°[C|F]
value = int(parts[2][:-2])
if self._spa_temp != value:
self._spa_temp = value
self._is_metric = parts[2][-1:] == 'C'
data_changed_callback(self)
elif parts[0] == 'Air' and parts[1] == 'Temp':
# Air Temp <temp>°[C|F]
value = int(parts[2][:-2])
if self._air_temp != value:
self._air_temp = value
self._is_metric = parts[2][-1:] == 'C'
data_changed_callback(self)
elif parts[0] == 'Pool' and parts[1] == 'Chlorinator':
# Pool Chlorinator <value>%
value = int(parts[2][:-1])
if self._pool_chlorinator != value:
self._pool_chlorinator = value
data_changed_callback(self)
elif parts[0] == 'Spa' and parts[1] == 'Chlorinator':
# Spa Chlorinator <value>%
value = int(parts[2][:-1])
if self._spa_chlorinator != value:
self._spa_chlorinator = value
data_changed_callback(self)
elif parts[0] == 'Salt' and parts[1] == 'Level':
# Salt Level <value> [g/L|PPM|
value = float(parts[2])
if self._salt_level != value:
self._salt_level = value
self._is_metric = parts[3] == 'g/L'
data_changed_callback(self)
elif parts[0] == 'Check' and parts[1] == 'System':
# Check System <msg>
value = ' '.join(parts[2:])
if self._check_system_msg != value:
self._check_system_msg = value
data_changed_callback(self)
except ValueError:
pass
else:
_LOGGER.info('Unknown frame: %s %s',
binascii.hexlify(frame_type),
binascii.hexlify(frame)) |
Sends a key.
def send_key(self, key):
"""Sends a key."""
_LOGGER.info('Queueing key %s', key)
frame = self._get_key_event_frame(key)
# Queue it to send immediately following the reception
# of a keep-alive packet in an attempt to avoid bus collisions.
self._send_queue.put({'frame': frame}) |
Returns a set containing the enabled states.
def states(self):
"""Returns a set containing the enabled states."""
state_list = []
for state in States:
if state.value & self._states != 0:
state_list.append(state)
if (self._flashing_states & States.FILTER) != 0:
state_list.append(States.FILTER_LOW_SPEED)
return state_list |
Returns True if the specified state is enabled.
def get_state(self, state):
"""Returns True if the specified state is enabled."""
# Check to see if we have a change request pending; if we do
# return the value we expect it to change to.
for data in list(self._send_queue.queue):
desired_states = data['desired_states']
for desired_state in desired_states:
if desired_state['state'] == state:
return desired_state['enabled']
if state == States.FILTER_LOW_SPEED:
return (States.FILTER.value & self._flashing_states) != 0
return (state.value & self._states) != 0 |
Set the state.
def set_state(self, state, enable):
"""Set the state."""
is_enabled = self.get_state(state)
if is_enabled == enable:
return True
key = None
desired_states = [{'state': state, 'enabled': not is_enabled}]
if state == States.FILTER_LOW_SPEED:
if not self._multi_speed_pump:
return False
# Send the FILTER key once.
# If the pump is in high speed, it wil switch to low speed.
# If the pump is off the retry mechanism will send an additional
# FILTER key to switch into low speed.
# If the pump is in low speed then we pretend the pump is off;
# the retry mechanism will send an additional FILTER key
# to switch into high speed.
key = Keys.FILTER
desired_states.append({'state': States.FILTER, 'enabled': True})
else:
# See if this state has a corresponding Key
try:
key = Keys[state.name]
except KeyError:
# TODO: send the appropriate combination of keys
# to enable the state
return False
frame = self._get_key_event_frame(key)
# Queue it to send immediately following the reception
# of a keep-alive packet in an attempt to avoid bus collisions.
self._send_queue.put({'frame': frame, 'desired_states': desired_states,
'retries': 10})
return True |
Decorates a function by tracing the begining and
end of the function execution, if doTrace global is True
def trace(function, *args, **k) :
"""Decorates a function by tracing the begining and
end of the function execution, if doTrace global is True"""
if doTrace : print ("> "+function.__name__, args, k)
result = function(*args, **k)
if doTrace : print ("< "+function.__name__, args, k, "->", result)
return result |
url = QtCore.QUrl("http://maps.google.com/maps/geo/")
url.addQueryItem("q", location)
url.addQueryItem("output", "csv")
url.addQueryItem("sensor", "false")
def geocode(self, location) :
url = QtCore.QUrl("http://maps.googleapis.com/maps/api/geocode/xml")
url.addQueryItem("address", location)
url.addQueryItem("sensor", "false")
"""
url = QtCore.QUrl("http://maps.google.com/maps/geo/")
url.addQueryItem("q", location)
url.addQueryItem("output", "csv")
url.addQueryItem("sensor", "false")
"""
request = QtNetwork.QNetworkRequest(url)
reply = self.get(request)
while reply.isRunning() :
QtGui.QApplication.processEvents()
reply.deleteLater()
self.deleteLater()
return self._parseResult(reply) |
Force a correlation matrix on a set of statistically distributed objects.
This function works on objects in-place.
Parameters
----------
params : array
An array of of uv objects.
corrmat : 2d-array
The correlation matrix to be imposed
def correlate(params, corrmat):
"""
Force a correlation matrix on a set of statistically distributed objects.
This function works on objects in-place.
Parameters
----------
params : array
An array of of uv objects.
corrmat : 2d-array
The correlation matrix to be imposed
"""
# Make sure all inputs are compatible
assert all(
[isinstance(param, UncertainFunction) for param in params]
), 'All inputs to "correlate" must be of type "UncertainFunction"'
# Put each ufunc's samples in a column-wise matrix
data = np.vstack([param._mcpts for param in params]).T
# Apply the correlation matrix to the sampled data
new_data = induce_correlations(data, corrmat)
# Re-set the samples to the respective variables
for i in range(len(params)):
params[i]._mcpts = new_data[:, i] |
Induce a set of correlations on a column-wise dataset
Parameters
----------
data : 2d-array
An m-by-n array where m is the number of samples and n is the
number of independent variables, each column of the array corresponding
to each variable
corrmat : 2d-array
An n-by-n array that defines the desired correlation coefficients
(between -1 and 1). Note: the matrix must be symmetric and
positive-definite in order to induce.
Returns
-------
new_data : 2d-array
An m-by-n array that has the desired correlations.
def induce_correlations(data, corrmat):
"""
Induce a set of correlations on a column-wise dataset
Parameters
----------
data : 2d-array
An m-by-n array where m is the number of samples and n is the
number of independent variables, each column of the array corresponding
to each variable
corrmat : 2d-array
An n-by-n array that defines the desired correlation coefficients
(between -1 and 1). Note: the matrix must be symmetric and
positive-definite in order to induce.
Returns
-------
new_data : 2d-array
An m-by-n array that has the desired correlations.
"""
# Create an rank-matrix
data_rank = np.vstack([rankdata(datai) for datai in data.T]).T
# Generate van der Waerden scores
data_rank_score = data_rank / (data_rank.shape[0] + 1.0)
data_rank_score = norm(0, 1).ppf(data_rank_score)
# Calculate the lower triangular matrix of the Cholesky decomposition
# of the desired correlation matrix
p = chol(corrmat)
# Calculate the current correlations
t = np.corrcoef(data_rank_score, rowvar=0)
# Calculate the lower triangular matrix of the Cholesky decomposition
# of the current correlation matrix
q = chol(t)
# Calculate the re-correlation matrix
s = np.dot(p, np.linalg.inv(q))
# Calculate the re-sampled matrix
new_data = np.dot(data_rank_score, s.T)
# Create the new rank matrix
new_data_rank = np.vstack([rankdata(datai) for datai in new_data.T]).T
# Sort the original data according to new_data_rank
for i in range(data.shape[1]):
vals, order = np.unique(
np.hstack((data_rank[:, i], new_data_rank[:, i])), return_inverse=True
)
old_order = order[: new_data_rank.shape[0]]
new_order = order[-new_data_rank.shape[0] :]
tmp = data[np.argsort(old_order), i][new_order]
data[:, i] = tmp[:]
return data |
Plots a scatterplot matrix of subplots.
Usage:
plotcorr(X)
plotcorr(..., plotargs=...) # e.g., 'r*', 'bo', etc.
plotcorr(..., full=...) # e.g., True or False
plotcorr(..., labels=...) # e.g., ['label1', 'label2', ...]
Each column of "X" is plotted against other columns, resulting in
a ncols by ncols grid of subplots with the diagonal subplots labeled
with "labels". "X" is an array of arrays (i.e., a 2d matrix), a 1d array
of MCERP.UncertainFunction/Variable objects, or a mixture of the two.
Additional keyword arguments are passed on to matplotlib's "plot" command.
Returns the matplotlib figure object containing the subplot grid.
def plotcorr(X, plotargs=None, full=True, labels=None):
"""
Plots a scatterplot matrix of subplots.
Usage:
plotcorr(X)
plotcorr(..., plotargs=...) # e.g., 'r*', 'bo', etc.
plotcorr(..., full=...) # e.g., True or False
plotcorr(..., labels=...) # e.g., ['label1', 'label2', ...]
Each column of "X" is plotted against other columns, resulting in
a ncols by ncols grid of subplots with the diagonal subplots labeled
with "labels". "X" is an array of arrays (i.e., a 2d matrix), a 1d array
of MCERP.UncertainFunction/Variable objects, or a mixture of the two.
Additional keyword arguments are passed on to matplotlib's "plot" command.
Returns the matplotlib figure object containing the subplot grid.
"""
import matplotlib.pyplot as plt
X = [Xi._mcpts if isinstance(Xi, UncertainFunction) else Xi for Xi in X]
X = np.atleast_2d(X)
numvars, numdata = X.shape
fig, axes = plt.subplots(nrows=numvars, ncols=numvars, figsize=(8, 8))
fig.subplots_adjust(hspace=0.0, wspace=0.0)
for ax in axes.flat:
# Hide all ticks and labels
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
# Set up ticks only on one side for the "edge" subplots...
if full:
if ax.is_first_col():
ax.yaxis.set_ticks_position("left")
if ax.is_last_col():
ax.yaxis.set_ticks_position("right")
if ax.is_first_row():
ax.xaxis.set_ticks_position("top")
if ax.is_last_row():
ax.xaxis.set_ticks_position("bottom")
else:
if ax.is_first_row():
ax.xaxis.set_ticks_position("top")
if ax.is_last_col():
ax.yaxis.set_ticks_position("right")
# Label the diagonal subplots...
if not labels:
labels = ["x" + str(i) for i in range(numvars)]
for i, label in enumerate(labels):
axes[i, i].annotate(
label, (0.5, 0.5), xycoords="axes fraction", ha="center", va="center"
)
# Plot the data
for i, j in zip(*np.triu_indices_from(axes, k=1)):
if full:
idx = [(i, j), (j, i)]
else:
idx = [(i, j)]
for x, y in idx:
# FIX #1: this needed to be changed from ...(data[x], data[y],...)
if plotargs is None:
if len(X[x]) > 100:
plotargs = ",b" # pixel marker
else:
plotargs = ".b" # point marker
axes[x, y].plot(X[y], X[x], plotargs)
ylim = min(X[y]), max(X[y])
xlim = min(X[x]), max(X[x])
axes[x, y].set_ylim(
xlim[0] - (xlim[1] - xlim[0]) * 0.1, xlim[1] + (xlim[1] - xlim[0]) * 0.1
)
axes[x, y].set_xlim(
ylim[0] - (ylim[1] - ylim[0]) * 0.1, ylim[1] + (ylim[1] - ylim[0]) * 0.1
)
# Turn on the proper x or y axes ticks.
if full:
for i, j in zip(list(range(numvars)), itertools.cycle((-1, 0))):
axes[j, i].xaxis.set_visible(True)
axes[i, j].yaxis.set_visible(True)
else:
for i in range(numvars - 1):
axes[0, i + 1].xaxis.set_visible(True)
axes[i, -1].yaxis.set_visible(True)
for i in range(1, numvars):
for j in range(0, i):
fig.delaxes(axes[i, j])
# FIX #2: if numvars is odd, the bottom right corner plot doesn't have the
# correct axes limits, so we pull them from other axes
if numvars % 2:
xlimits = axes[0, -1].get_xlim()
ylimits = axes[-1, 0].get_ylim()
axes[-1, -1].set_xlim(xlimits)
axes[-1, -1].set_ylim(ylimits)
return fig |
Calculate the lower triangular matrix of the Cholesky decomposition of
a symmetric, positive-definite matrix.
def chol(A):
"""
Calculate the lower triangular matrix of the Cholesky decomposition of
a symmetric, positive-definite matrix.
"""
A = np.array(A)
assert A.shape[0] == A.shape[1], "Input matrix must be square"
L = [[0.0] * len(A) for _ in range(len(A))]
for i in range(len(A)):
for j in range(i + 1):
s = sum(L[i][k] * L[j][k] for k in range(j))
L[i][j] = (
(A[i][i] - s) ** 0.5 if (i == j) else (1.0 / L[j][j] * (A[i][j] - s))
)
return np.array(L) |
A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
def get(self, uri, params={}):
'''A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.get(urljoin(Investigate.BASE_URL, uri),
params=params, headers=self._auth_header, proxies=self.proxies
) |
A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
def post(self, uri, params={}, data={}):
'''A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.post(
urljoin(Investigate.BASE_URL, uri),
params=params, data=data, headers=self._auth_header,
proxies=self.proxies
) |
Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
def get_parse(self, uri, params={}):
'''Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.get, uri, params) |
Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data) |
Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
def categorization(self, domains, labels=False):
'''Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
'''
if type(domains) is str:
return self._get_categorization(domains, labels)
elif type(domains) is list:
return self._post_categorization(domains, labels)
else:
raise Investigate.DOMAIN_ERR |
Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
def cooccurrences(self, domain):
'''Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
'''
uri = self._uris["cooccurrences"].format(domain)
return self.get_parse(uri) |
Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri) |
Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
def security(self, domain):
'''Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
'''
uri = self._uris["security"].format(domain)
return self.get_parse(uri) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.