docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Extract tracers data.
Args:
tracersfile (:class:`pathlib.Path`): path of the binary tracers file.
Returns:
dict of list of numpy.array:
Tracers data organized by attribute and block.
|
def tracers(tracersfile):
if not tracersfile.is_file():
return None
tra = {}
with tracersfile.open('rb') as fid:
readbin = partial(_readbin, fid)
magic = readbin()
if magic > 8000: # 64 bits
magic -= 8000
readbin()
readbin = partial(readbin, file64=True)
if magic < 100:
raise ParsingError(tracersfile,
'magic > 100 expected to get tracervar info')
nblk = magic % 100
readbin('f', 2) # aspect ratio
readbin() # istep
readbin('f') # time
ninfo = readbin()
ntra = readbin(nwords=nblk, unpack=False)
readbin('f') # tracer ideal mass
curv = readbin()
if curv:
readbin('f') # r_cmb
infos = [] # list of info names
for _ in range(ninfo):
infos.append(b''.join(readbin('b', 16)).strip().decode())
tra[infos[-1]] = []
if magic > 200:
ntrace_elt = readbin()
if ntrace_elt > 0:
readbin('f', ntrace_elt) # outgassed
for ntrab in ntra: # blocks
data = readbin('f', ntrab * ninfo)
for idx, info in enumerate(infos):
tra[info].append(data[idx::ninfo])
return tra
| 680,217
|
Return group content.
Args:
filename (:class:`pathlib.Path`): path of hdf5 file.
groupname (str): name of group to read.
Returns:
:class:`numpy.array`: content of group.
|
def _read_group_h5(filename, groupname):
with h5py.File(filename, 'r') as h5f:
data = h5f[groupname][()]
return data
| 680,218
|
Add a dimension to field if necessary.
Args:
field (numpy.array): the field that need to be 3d.
twod (str): 'XZ', 'YZ' or None depending on what is relevant.
Returns:
numpy.array: reshaped field.
|
def _make_3d(field, twod):
shp = list(field.shape)
if twod and 'X' in twod:
shp.insert(1, 1)
elif twod:
shp.insert(0, 1)
return field.reshape(shp)
| 680,219
|
Read all coord hdf5 files of a snapshot.
Args:
files (list of pathlib.Path): list of NodeCoordinates files of
a snapshot.
shapes (list of (int,int)): shape of mesh grids.
header (dict): geometry info.
twod (str): 'XZ', 'YZ' or None depending on what is relevant.
|
def _read_coord_h5(files, shapes, header, twod):
meshes = []
for h5file, shape in zip(files, shapes):
meshes.append({})
with h5py.File(h5file, 'r') as h5f:
for coord, mesh in h5f.items():
# for some reason, the array is transposed!
meshes[-1][coord] = mesh[()].reshape(shape).T
meshes[-1][coord] = _make_3d(meshes[-1][coord], twod)
header['ncs'] = _ncores(meshes, twod)
header['nts'] = list((meshes[0]['X'].shape[i] - 1) * header['ncs'][i]
for i in range(3))
header['nts'] = np.array([max(1, val) for val in header['nts']])
# meshes could also be defined in legacy parser, so that these can be used
# in geometry setup
meshes = _conglomerate_meshes(meshes, header)
if np.any(meshes['Z'][:, :, 0] != 0):
# spherical
header['x_mesh'] = np.copy(meshes['Y']) # annulus geometry...
header['y_mesh'] = np.copy(meshes['Z'])
header['z_mesh'] = np.copy(meshes['X'])
header['r_mesh'] = np.sqrt(header['x_mesh']**2 + header['y_mesh']**2 +
header['z_mesh']**2)
header['t_mesh'] = np.arccos(header['z_mesh'] / header['r_mesh'])
header['p_mesh'] = np.roll(
np.arctan2(header['y_mesh'], -header['x_mesh']) + np.pi, -1, 1)
header['e1_coord'] = header['t_mesh'][:, 0, 0]
header['e2_coord'] = header['p_mesh'][0, :, 0]
header['e3_coord'] = header['r_mesh'][0, 0, :]
else:
header['e1_coord'] = meshes['X'][:, 0, 0]
header['e2_coord'] = meshes['Y'][0, :, 0]
header['e3_coord'] = meshes['Z'][0, 0, :]
header['aspect'] = (header['e1_coord'][-1] - header['e2_coord'][0],
header['e1_coord'][-1] - header['e2_coord'][0])
header['rcmb'] = header['e3_coord'][0]
if header['rcmb'] == 0:
header['rcmb'] = -1
else:
# could make the difference between r_coord and z_coord
header['e3_coord'] = header['e3_coord'] - header['rcmb']
if twod is None or 'X' in twod:
header['e1_coord'] = header['e1_coord'][:-1]
if twod is None or 'Y' in twod:
header['e2_coord'] = header['e2_coord'][:-1]
header['e3_coord'] = header['e3_coord'][:-1]
| 680,222
|
Extract geometry information from hdf5 files.
Args:
xdmf_file (:class:`pathlib.Path`): path of the xdmf file.
snapshot (int): snapshot number.
Returns:
(dict, root): geometry information and root of xdmf document.
|
def read_geom_h5(xdmf_file, snapshot):
header = {}
xdmf_root = xmlET.parse(str(xdmf_file)).getroot()
if snapshot is None:
return None, xdmf_root
# Domain, Temporal Collection, Snapshot
# should check that this is indeed the required snapshot
elt_snap = xdmf_root[0][0][snapshot]
header['ti_ad'] = float(elt_snap.find('Time').get('Value'))
header['mo_lambda'] = _maybe_get(elt_snap, 'mo_lambda', 'Value', float)
header['mo_thick_sol'] = _maybe_get(elt_snap, 'mo_thick_sol', 'Value',
float)
header['ntb'] = 1
coord_h5 = [] # all the coordinate files
coord_shape = [] # shape of meshes
twod = None
for elt_subdomain in elt_snap.findall('Grid'):
if elt_subdomain.get('Name').startswith('meshYang'):
header['ntb'] = 2
break # iterate only through meshYin
elt_geom = elt_subdomain.find('Geometry')
if elt_geom.get('Type') == 'X_Y' and twod is None:
twod = ''
for data_item in elt_geom.findall('DataItem'):
coord = data_item.text.strip()[-1]
if coord in 'XYZ':
twod += coord
data_item = elt_geom.find('DataItem')
coord_shape.append(_get_dim(data_item))
coord_h5.append(
xdmf_file.parent / data_item.text.strip().split(':/', 1)[0])
_read_coord_h5(coord_h5, coord_shape, header, twod)
return header, xdmf_root
| 680,225
|
Extract field data from hdf5 files.
Args:
xdmf_file (:class:`pathlib.Path`): path of the xdmf file.
fieldname (str): name of field to extract.
snapshot (int): snapshot number.
header (dict): geometry information.
Returns:
(dict, numpy.array): geometry information and field data. None
is returned if data is unavailable.
|
def read_field_h5(xdmf_file, fieldname, snapshot, header=None):
if header is None:
header, xdmf_root = read_geom_h5(xdmf_file, snapshot)
else:
xdmf_root = xmlET.parse(str(xdmf_file)).getroot()
npc = header['nts'] // header['ncs'] # number of grid point per node
flds = np.zeros(_flds_shape(fieldname, header))
data_found = False
for elt_subdomain in xdmf_root[0][0][snapshot].findall('Grid'):
ibk = int(elt_subdomain.get('Name').startswith('meshYang'))
for data_attr in elt_subdomain.findall('Attribute'):
if data_attr.get('Name') != fieldname:
continue
icore, fld = _get_field(xdmf_file, data_attr.find('DataItem'))
# for some reason, the field is transposed
fld = fld.T
shp = fld.shape
if shp[-1] == 1 and header['nts'][0] == 1: # YZ
fld = fld.reshape((shp[0], 1, shp[1], shp[2]))
if header['rcmb'] < 0:
fld = fld[(2, 0, 1), ...]
elif shp[-1] == 1: # XZ
fld = fld.reshape((shp[0], shp[1], 1, shp[2]))
if header['rcmb'] < 0:
fld = fld[(0, 2, 1), ...]
elif header['nts'][1] == 1: # cart XZ
fld = fld.reshape((1, shp[0], 1, shp[1]))
ifs = [icore // np.prod(header['ncs'][:i]) % header['ncs'][i] *
npc[i] for i in range(3)]
if header['zp']: # remove top row
fld = fld[:, :, :, :-1]
flds[:,
ifs[0]:ifs[0] + npc[0] + header['xp'],
ifs[1]:ifs[1] + npc[1] + header['yp'],
ifs[2]:ifs[2] + npc[2],
ibk] = fld
data_found = True
flds = _post_read_flds(flds, header)
return (header, flds) if data_found else None
| 680,229
|
Extract tracers data from hdf5 files.
Args:
xdmf_file (:class:`pathlib.Path`): path of the xdmf file.
infoname (str): name of information to extract.
snapshot (int): snapshot number.
position (bool): whether to extract position of tracers.
Returns:
dict of list of numpy.array:
Tracers data organized by attribute and block.
|
def read_tracers_h5(xdmf_file, infoname, snapshot, position):
xdmf_root = xmlET.parse(str(xdmf_file)).getroot()
tra = {}
tra[infoname] = [{}, {}] # two blocks, ordered by cores
if position:
for axis in 'xyz':
tra[axis] = [{}, {}]
for elt_subdomain in xdmf_root[0][0][snapshot].findall('Grid'):
ibk = int(elt_subdomain.get('Name').startswith('meshYang'))
if position:
for data_attr in elt_subdomain.findall('Geometry'):
for data_item, axis in zip(data_attr.findall('DataItem'),
'xyz'):
icore, data = _get_field(xdmf_file, data_item)
tra[axis][ibk][icore] = data
for data_attr in elt_subdomain.findall('Attribute'):
if data_attr.get('Name') != infoname:
continue
icore, data = _get_field(xdmf_file, data_attr.find('DataItem'))
tra[infoname][ibk][icore] = data
for info in tra:
tra[info] = [trab for trab in tra[info] if trab] # remove empty blocks
for iblk, trab in enumerate(tra[info]):
tra[info][iblk] = np.concatenate([trab[icore]
for icore in range(len(trab))])
return tra
| 680,230
|
Iterate through (isnap, istep) recorded in h5folder/'time_botT.h5'.
Args:
h5folder (:class:`pathlib.Path`): directory of HDF5 output files.
Yields:
tuple of int: (isnap, istep).
|
def read_time_h5(h5folder):
with h5py.File(h5folder / 'time_botT.h5', 'r') as h5f:
for name, dset in h5f.items():
yield int(name[-5:]), int(dset[2])
| 680,231
|
Get docker build args dict, rendering any templated args.
Args:
options (dict):
The dictionary for a given image from chartpress.yaml.
Fields in `options['buildArgs']` will be rendered and returned,
if defined.
ns (dict): the namespace used when rendering templated arguments
|
def render_build_args(options, ns):
build_args = options.get('buildArgs', {})
for key, value in build_args.items():
build_args[key] = value.format(**ns)
return build_args
| 680,784
|
Build an image
Args:
image_path (str): the path to the image directory
image_name (str): image 'name:tag' to build
build_args (dict, optional): dict of docker build arguments
dockerfile_path (str, optional):
path to dockerfile relative to image_path
if not `image_path/Dockerfile`.
|
def build_image(image_path, image_name, build_args=None, dockerfile_path=None):
cmd = ['docker', 'build', '-t', image_name, image_path]
if dockerfile_path:
cmd.extend(['-f', dockerfile_path])
for k, v in (build_args or {}).items():
cmd += ['--build-arg', '{}={}'.format(k, v)]
check_call(cmd)
| 680,785
|
Return whether an image needs pushing
Args:
image (str): the `repository:tag` image to be build.
Returns:
True: if image needs to be pushed (not on registry)
False: if not (already present on registry)
|
def image_needs_pushing(image):
d = docker_client()
try:
d.images.get_registry_data(image)
except docker.errors.APIError:
# image not found on registry, needs pushing
return True
else:
return False
| 680,786
|
Return whether an image needs building
Checks if the image exists (ignores commit range),
either locally or on the registry.
Args:
image (str): the `repository:tag` image to be build.
Returns:
True: if image needs to be built
False: if not (image already exists)
|
def image_needs_building(image):
d = docker_client()
# first, check for locally built image
try:
d.images.get(image)
except docker.errors.ImageNotFound:
# image not found, check registry
pass
else:
# it exists locally, no need to check remote
return False
# image may need building if it's not on the registry
return image_needs_pushing(image)
| 680,787
|
Adds values to specified bundle. Checks, whether each value already contains in bundle. If yes, it is not added.
Args:
connection: An opened Connection instance.
bundle: Bundle instance to add values in.
values: Values, that should be added in bundle.
Raises:
YouTrackException: if something is wrong with queries.
|
def add_values_to_bundle_safe(connection, bundle, values):
for value in values:
try:
connection.addValueToBundle(bundle, value)
except YouTrackException as e:
if e.response.status == 409:
print("Value with name [ %s ] already exists in bundle [ %s ]" %
(utf8encode(value.name), utf8encode(bundle.name)))
else:
raise e
| 680,846
|
Validates that the geometry is correctly formatted according to the geometry type.
Parameters:
- **fixerrors** (optional): Attempts to fix minor errors without raising exceptions (defaults to True)
Returns:
- True if the geometry is valid.
Raises:
- An Exception if not valid.
|
def validate(self, fixerrors=True):
# validate nullgeometry or has type and coordinates keys
if not self._data:
# null geometry, no further checking needed
return True
elif "type" not in self._data or "coordinates" not in self._data:
raise Exception("A geometry dictionary or instance must have the type and coordinates entries")
# first validate geometry type
if not self.type in ("Point","MultiPoint","LineString","MultiLineString","Polygon","MultiPolygon"):
if fixerrors:
coretype = self.type.lower().replace("multi","")
if coretype == "point":
newtype = "Point"
elif coretype == "linestring":
newtype = "LineString"
elif coretype == "polygon":
newtype = "Polygon"
else:
raise Exception('Invalid geometry type. Must be one of: "Point","MultiPoint","LineString","MultiLineString","Polygon","MultiPolygon"')
if self.type.lower().startswith("multi"):
newtype = "Multi" + newtype
self.type = newtype
else:
raise Exception('Invalid geometry type. Must be one of: "Point","MultiPoint","LineString","MultiLineString","Polygon","MultiPolygon"')
# then validate coordinate data type
coords = self._data["coordinates"]
if not isinstance(coords, (list,tuple)): raise Exception("Coordinates must be a list or tuple type")
# then validate coordinate structures
if self.type == "Point":
if not len(coords) == 2: raise Exception("Point must be one coordinate pair")
elif self.type in ("MultiPoint","LineString"):
if not len(coords) > 1: raise Exception("MultiPoint and LineString must have more than one coordinates")
elif self.type == "MultiLineString":
for line in coords:
if not len(line) > 1: raise Exception("All LineStrings in a MultiLineString must have more than one coordinate")
elif self.type == "Polygon":
for exterior_or_holes in coords:
if not len(exterior_or_holes) >= 3: raise Exception("The exterior and all holes in a Polygon must have at least 3 coordinates")
elif self.type == "MultiPolygon":
for eachmulti in coords:
for exterior_or_holes in eachmulti:
if not len(exterior_or_holes) >= 3: raise Exception("The exterior and all holes in all Polygons of a MultiPolygon must have at least 3 coordinates")
# validation successful
return True
| 681,222
|
If obj isn't specified, geometry and properties can be set as arguments directly.
Parameters:
- **obj**: Another feature instance, an object with the \_\_geo_interface__ or a geojson dictionary of the Feature type.
- **geometry** (optional): Anything that the Geometry instance can accept.
- **properties** (optional): A dictionary of key-value property pairs.
|
def __init__(self, obj=None, geometry=None, properties=None):
properties = properties or {}
if isinstance(obj, Feature):
# from scrath as copy of another feat instance
self._data = {"type":"Feature",
"geometry":Geometry(obj.geometry).__geo_interface__,
"properties":obj.properties.copy() }
elif isinstance(obj, dict):
# comes straight from geojfile _iter_, so must use original dict
# Note: user should not specify directly as dict, since won't validate, any better way?
self._data = obj
else:
# from scratch from geometry/properties
self._data = {"type":"Feature",
"geometry":Geometry(geometry).__geo_interface__,
"properties":properties.copy() }
| 681,223
|
Validates that the feature is correctly formatted.
Parameters:
- **fixerrors** (optional): Attempts to fix minor errors without raising exceptions (defaults to True)
Returns:
- True if the feature is valid.
Raises:
- An Exception if not valid.
|
def validate(self, fixerrors=True):
if not "type" in self._data or self._data["type"] != "Feature":
if fixerrors:
self._data["type"] = "Feature"
else:
raise Exception("A geojson feature dictionary must contain a type key and it must be named 'Feature'.")
if not "geometry" in self._data:
if fixerrors:
self.geometry = Geometry() # nullgeometry
else:
raise Exception("A geojson feature dictionary must contain a geometry key.")
if not "properties" in self._data or not isinstance(self.properties,dict):
if fixerrors:
self._data["properties"] = dict()
else:
raise Exception("A geojson feature dictionary must contain a properties key and it must be a dictionary type.")
self.geometry.validate(fixerrors)
return True
| 681,226
|
Adds a given feature. If obj isn't specified, geometry and properties can be set as arguments directly.
Parameters:
- **obj**: Another feature instance, an object with the \_\_geo_interface__ or a geojson dictionary of the Feature type.
- **geometry** (optional): Anything that the Geometry instance can accept.
- **properties** (optional): A dictionary of key-value property pairs.
|
def add_feature(self, obj=None, geometry=None, properties=None):
properties = properties or {}
if isinstance(obj, Feature):
# instead of creating copy, the original feat should reference the same one that was added here
feat = obj._data
elif isinstance(obj, dict):
feat = obj.copy()
else:
feat = Feature(geometry=geometry, properties=properties).__geo_interface__
self._data["features"].append(feat)
| 681,230
|
Saves the geojson instance to file. To save with a different text encoding use the 'encoding' argument.
Parameters:
- **savepath**: Filepath to save the file.
|
def save(self, savepath, **kwargs):
self.update_bbox()
tempfile = open(savepath,"w")
json.dump(self._data, tempfile, **kwargs)
tempfile.close()
| 681,235
|
Get a logging handle.
As with ``setup_logging``, a stream handler is added to the
log handle.
Arguments:
name (str): Name of the log handle. Default is ``None``.
|
def get_logger(name=None):
logger = logging.getLogger(name)
if len(logger.handlers) == 0:
logger = add_stream_handler(logger)
return logger
| 681,479
|
Lists files in a given directory.
Args:
d (str): Path to a directory.
extension (str): If supplied, only files that contain the
specificied extension will be returned. Default is ``False``,
which returns all files in ``d``.
Returns:
list: A sorted list of file paths.
|
def list_files(d, extension=None):
if os.path.isdir(d):
expanded_dir = os.path.expanduser(d)
files = sorted(glob.glob(expanded_dir + '/*'))
else:
files = [d, ]
if extension is not None:
if type(extension) in STR_TYPES:
extension = [extension, ]
files = [f for f in files if any([f.split('.')[-1] in extension,
f.split('.')[-1].upper() in extension,
f.split('.')[-1].lower() in extension])]
return files
| 681,587
|
Converts ABI or AB1 files to FASTA format.
Args:
input (str): Path to a file or directory containing abi/ab1 files or
zip archives of abi/ab1 files
output (str): Path to a directory for the output FASTA files
|
def abi_to_fasta(input, output):
direcs = [input, ]
# unzip any zip archives
zip_files = list_files(input, ['zip'])
if zip_files:
direcs.extend(_process_zip_files(zip_files))
# convert files
for d in direcs:
files = list_files(d, ['ab1', 'abi'])
seqs = [SeqIO.read(open(f, 'rb'), 'abi') for f in files]
# seqs = list(chain.from_iterable(seqs))
fastas = ['>{}\n{}'.format(s.id, str(s.seq)) for s in seqs]
ofile = os.path.basename(os.path.normpath(d)) + '.fasta'
opath = os.path.join(output, ofile)
open(opath, 'w').write('\n'.join(fastas))
| 681,642
|
Uploads a single file to S3, using s3cmd.
Args:
f (str): Path to a single file.
s3_path (str): The S3 path, with the filename omitted. The S3 filename
will be the basename of the ``f``. For example::
put(f='/path/to/myfile.tar.gz', s3_path='s3://my_bucket/path/to/')
will result in an uploaded S3 path of ``s3://my_bucket/path/to/myfile.tar.gz``
|
def put(f, s3_path, multipart_chunk_size_mb=500, logger=None):
if not logger:
logger = log.get_logger('s3')
fname = os.path.basename(f)
target = os.path.join(s3_path, fname)
s3cmd_cline = 's3cmd put {} {} --multipart-chunk-size-mb {}'.format(f,
target,
multipart_chunk_size_mb)
print_put_info(fname, target, logger)
s3cmd = sp.Popen(s3cmd_cline,
stdout=sp.PIPE,
stderr=sp.PIPE,
shell=True)
stdout, stderr = s3cmd.communicate()
| 681,683
|
Creates a compressed/uncompressed tar file.
Args:
d: Can be one of three things:
1. the path to a single file, as a string
2. the path to a single directory, as a string
3. an iterable of file or directory paths
output (str): Output file path.
fmt: Compression method. Options are ``'gz'`` (gzip),
``'bz2'`` (bzip2) and ``'none'`` (uncompressed). Default is ``'gz'``.
|
def compress(d, output, fmt='gz', logger=None):
if not logger:
logger = log.get_logger('s3')
if type(d) not in [list, tuple]:
d = [d, ]
d = [os.path.expanduser(_d) for _d in d]
print_compress_info(d, output, compress, logger)
if fmt.lower() == 'none':
fmt = ''
elif fmt.lower() not in ['gz', 'bz2']:
logger.info('Compression option ("{}") is invalid.\nFalling back to uncompressed.'.format(fmt))
fmt = ''
output = os.path.expanduser(output)
tar = tarfile.open(output, 'w:{}'.format(fmt))
for obj in d:
tar.add(obj)
tar.close()
return output
| 681,685
|
Configures s3cmd prior to first use.
If no arguments are provided, you will be prompted to enter
the access key and secret key interactively.
Args:
access_key (str): AWS access key
secret_key (str): AWS secret key
|
def configure(access_key=None, secret_key=None, logger=None):
if not logger:
logger = log.get_logger('s3')
if not all([access_key, secret_key]):
logger.info('')
access_key = input('AWS Access Key: ')
secret_key = input('AWS Secret Key: ')
_write_config(access_key, secret_key)
logger.info('')
logger.info('Completed writing S3 config file.')
logger.info('')
| 681,687
|
Updates MongoDB documents.
Sets ``field`` equal to ``value`` for all documents that
meet ``match`` criteria.
Arguments:
field (str): Field to update.
value (str): Update value.
db (Database): A pymongo Database object.
collection (str): Collection name.
match (dict): A dictionary containing the match criteria, for example::
{'seq_id': {'$in': ['a', 'b', 'c']}, 'cdr3_len': {'$gte': 18}}
|
def update(field, value, db, collection, match=None):
c = db[collection]
match = match if match is not None else {}
# check MongoDB version to use appropriate update command
if db.client.server_info()['version'].startswith('2'):
c.update(match, {'$set': {field: value}}, multi=True)
else:
c.update_many(match, {'$set': {field: value}})
| 681,705
|
Returns a region of ``Sequence.sequence``, in FASTA format.
If called without kwargs, the entire sequence will be returned.
Args:
start (int): Start position of the region to be returned. Default
is 0.
end (int): End position of the region to be returned. Negative values
will function as they do when slicing strings.
Returns:
str: A region of ``Sequence.sequence``, in FASTA format
|
def region(self, start=0, end=None):
if end is None:
end = len(self.sequence)
return '>{}\n{}'.format(self.id, self.sequence[start:end])
| 681,753
|
Generates a matplotlib colormap from a single color.
Colormap will be built, by default, from white to ``color``.
Args:
color: Can be one of several things:
1. Hex code
2. HTML color name
3. RGB tuple
dark (bool): If ``True``, colormap will be built from ``color`` to
black. Default is ``False``, which builds a colormap from
white to ``color``.
Returns:
colormap: A matplotlib colormap
|
def cmap_from_color(color, dark=False):
if dark:
return sns.dark_palette(color, as_cmap=True)
else:
return sns.light_palette(color, as_cmap=True)
| 681,839
|
Stacks two colormaps (``lower`` and ``upper``) such that
low half -> ``lower`` colors, high half -> ``upper`` colors
Args:
lower (colormap): colormap for the lower half of the stacked colormap.
upper (colormap): colormap for the upper half of the stacked colormap.
n (int): Number of colormap steps. Default is ``256``.
|
def stack_colormap(lower, upper, n=256):
A = get_cmap(lower)
B = get_cmap(upper)
name = "%s-%s" % (A.name, B.name)
lin = np.linspace(0, 1, n)
return array_cmap(np.vstack((A(lin), B(lin))), name, n=n)
| 681,845
|
Given an id found in scraped JSON, return a DB id for the object.
params:
json_id: id from json
allow_no_match: just return None if id can't be resolved
returns:
database id
raises:
ValueError if id couldn't be resolved
|
def resolve_json_id(self, json_id, allow_no_match=False):
if not json_id:
return None
if json_id.startswith('~'):
# keep caches of all the pseudo-ids to avoid doing 1000s of lookups during import
if json_id not in self.pseudo_id_cache:
spec = get_pseudo_id(json_id)
spec = self.limit_spec(spec)
if isinstance(spec, Q):
objects = self.model_class.objects.filter(spec)
else:
objects = self.model_class.objects.filter(**spec)
ids = {each.id for each in objects}
if len(ids) == 1:
self.pseudo_id_cache[json_id] = ids.pop()
errmsg = None
elif not ids:
errmsg = 'cannot resolve pseudo id to {}: {}'.format(
self.model_class.__name__, json_id)
else:
errmsg = 'multiple objects returned for {} pseudo id {}: {}'.format(
self.model_class.__name__, json_id, ids)
# either raise or log error
if errmsg:
if not allow_no_match:
raise UnresolvedIdError(errmsg)
else:
self.error(errmsg)
self.pseudo_id_cache[json_id] = None
# return the cached object
return self.pseudo_id_cache[json_id]
# get the id that the duplicate points to, or use self
json_id = self.duplicates.get(json_id, json_id)
try:
return self.json_to_db_id[json_id]
except KeyError:
raise UnresolvedIdError('cannot resolve id: {}'.format(json_id))
| 681,904
|
Splits tuple received from PacketHandler into packet UID and packet message.
Decodes packet and inserts into database backend.
Logs any exceptions raised.
Params:
input_data: message received from inbound stream through PacketHandler
topic: name of inbound stream message received from
**kwargs: any args required for connected to the backend
|
def process(self, input_data, topic=None, **kwargs):
try:
split = input_data[1:-1].split(',', 1)
uid, pkt = int(split[0]), split[1]
defn = self.packet_dict[uid]
decoded = tlm.Packet(defn, data=bytearray(pkt))
self.dbconn.insert(decoded, **kwargs)
except Exception as e:
log.error('Data archival failed with error: {}.'.format(e))
| 682,640
|
Creates an inbound stream from its config.
Params:
config: stream configuration as read by ait.config
Returns:
stream: a Stream
Raises:
ValueError: if any of the required config values are missing
|
def _create_inbound_stream(self, config=None):
if config is None:
raise ValueError('No stream config to create stream from.')
name = self._get_stream_name(config)
stream_handlers = self._get_stream_handlers(config, name)
stream_input = config.get('input', None)
if stream_input is None:
raise(cfg.AitConfigMissing('inbound stream {}\'s input'.format(name)))
if type(stream_input[0]) is int:
return PortInputStream(name,
stream_input,
stream_handlers,
zmq_args={'zmq_context': self.broker.context,
'zmq_proxy_xsub_url': self.broker.XSUB_URL,
'zmq_proxy_xpub_url': self.broker.XPUB_URL})
else:
return ZMQStream(name,
stream_input,
stream_handlers,
zmq_args={'zmq_context': self.broker.context,
'zmq_proxy_xsub_url': self.broker.XSUB_URL,
'zmq_proxy_xpub_url': self.broker.XPUB_URL})
| 682,675
|
Creates an outbound stream from its config.
Params:
config: stream configuration as read by ait.config
Returns:
stream: a Stream
Raises:
ValueError: if any of the required config values are missing
|
def _create_outbound_stream(self, config=None):
if config is None:
raise ValueError('No stream config to create stream from.')
name = self._get_stream_name(config)
stream_handlers = self._get_stream_handlers(config, name)
stream_input = config.get('input', None)
stream_output = config.get('output', None)
if type(stream_output) is int:
return PortOutputStream(name,
stream_input,
stream_output,
stream_handlers,
zmq_args={'zmq_context': self.broker.context,
'zmq_proxy_xsub_url': self.broker.XSUB_URL,
'zmq_proxy_xpub_url': self.broker.XPUB_URL})
else:
if stream_output is not None:
log.warn("Output of stream {} is not an integer port. "
"Stream outputs can only be ports.".format(name))
return ZMQStream(name,
stream_input,
stream_handlers,
zmq_args={'zmq_context': self.broker.context,
'zmq_proxy_xsub_url': self.broker.XSUB_URL,
'zmq_proxy_xpub_url': self.broker.XPUB_URL})
| 682,676
|
Creates a handler from its config.
Params:
config: handler config
Returns:
handler instance
|
def _create_handler(self, config):
if config is None:
raise ValueError('No handler config to create handler from.')
if 'name' not in config:
raise ValueError('Handler name is required.')
handler_name = config['name']
# try to create handler
module_name = handler_name.rsplit('.', 1)[0]
class_name = handler_name.rsplit('.', 1)[-1]
module = import_module(module_name)
handler_class = getattr(module, class_name)
instance = handler_class(**config)
return instance
| 682,677
|
Creates a plugin from its config.
Params:
config: plugin configuration as read by ait.config
Returns:
plugin: a Plugin
Raises:
ValueError: if any of the required config values are missing
|
def _create_plugin(self, config):
if config is None:
raise ValueError('No plugin config to create plugin from.')
name = config.pop('name', None)
if name is None:
raise(cfg.AitConfigMissing('plugin name'))
# TODO I don't think we actually care about this being unique? Left over from
# previous conversations about stuff?
module_name = name.rsplit('.', 1)[0]
class_name = name.rsplit('.', 1)[-1]
if class_name in [x.name for x in (self.outbound_streams +
self.inbound_streams +
self.servers +
self.plugins)]:
raise ValueError(
'Plugin "{}" already loaded. Only one plugin of a given name is allowed'.
format(class_name)
)
plugin_inputs = config.pop('inputs', None)
if plugin_inputs is None:
log.warn('No plugin inputs specified for {}'.format(name))
plugin_inputs = [ ]
subscribers = config.pop('outputs', None)
if subscribers is None:
log.warn('No plugin outputs specified for {}'.format(name))
subscribers = [ ]
# try to create plugin
module = import_module(module_name)
plugin_class = getattr(module, class_name)
instance = plugin_class(plugin_inputs,
subscribers,
zmq_args={'zmq_context': self.broker.context,
'zmq_proxy_xsub_url': self.broker.XSUB_URL,
'zmq_proxy_xpub_url': self.broker.XPUB_URL},
**config
)
return instance
| 682,679
|
Loops ait.config._datapaths from AIT_CONFIG and creates a directory.
Replaces year and doy with the respective year and day-of-year.
If neither are given as arguments, current UTC day and year are used.
Args:
paths:
[optional] list of directory paths you would like to create.
doy and year will be replaced by the datetime day and year, respectively.
datetime:
UTC Datetime string in ISO 8601 Format YYYY-MM-DDTHH:mm:ssZ
|
def createDirStruct(paths, verbose=True):
for k, path in paths.items():
p = None
try:
pathlist = path if type(path) is list else [ path ]
for p in pathlist:
os.makedirs(p)
if verbose:
log.info('Creating directory: ' + p)
except OSError, e:
#print path
if e.errno == errno.EEXIST and os.path.isdir(p):
pass
else:
raise
return True
| 682,721
|
Add an additional handler
Args:
handler:
A dictionary of handler configuration for the handler
that should be added. See :func:`__init__` for details
on valid parameters.
|
def add_handler(self, handler):
handler['logger'] = self._get_logger(handler)
handler['reads'] = 0
handler['data_read'] = 0
self.capture_handlers.append(handler)
| 682,728
|
Remove a handler given a name
Note, if multiple handlers have the same name the last matching
instance in the handler list will be removed.
Args:
name:
The name of the handler to remove
|
def remove_handler(self, name):
index = None
for i, h in enumerate(self.capture_handlers):
if h['name'] == name:
index = i
if index is not None:
self.capture_handlers[index]['logger'].close()
del self.capture_handlers[index]
| 682,729
|
Generate log file path for a given handler
Args:
handler:
The handler configuration dictionary for which a log file
path should be generated.
|
def _get_log_file(self, handler):
if 'file_name_pattern' not in handler:
filename = '%Y-%m-%d-%H-%M-%S-{name}.pcap'
else:
filename = handler['file_name_pattern']
log_file = handler['log_dir']
if 'path' in handler:
log_file = os.path.join(log_file, handler['path'], filename)
else:
log_file = os.path.join(log_file, filename)
log_file = time.strftime(log_file, time.gmtime())
log_file = log_file.format(**handler)
return log_file
| 682,735
|
Remove all handlers with a given name
Args:
name:
The name of the handler(s) to remove.
|
def stop_capture_handler(self, name):
empty_capturers_indeces = []
for k, sc in self._stream_capturers.iteritems():
stream_capturer = sc[0]
stream_capturer.remove_handler(name)
if stream_capturer.handler_count == 0:
self._pool.killone(sc[1])
empty_capturers_indeces.append(k)
for i in empty_capturers_indeces:
del self._stream_capturers[i]
| 682,739
|
Force a rotation of a handler's log file
Args:
name:
The name of the handler who's log file should be rotated.
|
def rotate_capture_handler_log(self, name):
for sc_key, sc in self._stream_capturers.iteritems():
for h in sc[0].capture_handlers:
if h['name'] == name:
sc[0]._rotate_log(h)
| 682,741
|
Return data for handlers of a given name.
Args:
name:
Name of the capture handler(s) to return config data for.
Returns:
Dictionary dump from the named capture handler as given by
the :func:`SocketStreamCapturer.dump_handler_config_data` method.
|
def get_capture_handler_config_by_name(self, name):
handler_confs = []
for address, stream_capturer in self._stream_capturers.iteritems():
handler_data = stream_capturer[0].dump_handler_config_data()
for h in handler_data:
if h['handler']['name'] == name:
handler_confs.append(h)
return handler_confs
| 682,744
|
Invokes each handler in sequence.
Publishes final output data.
Params:
input_data: message received by stream
topic: name of plugin or stream message received from,
if applicable
|
def process(self, input_data, topic=None):
for handler in self.handlers:
output = handler.handle(input_data)
input_data = output
self.publish(input_data)
| 682,855
|
Send a confirm prompt to the GUI
Arguments:
msg (string):
The message to display to the user.
_timeout (int):
The optional amount of time for which the prompt
should be displayed to the user before a timeout occurs.
Defaults to -1 which indicates there is no timeout limit.
|
def confirm(self, msg, _timeout=-1):
return self.msgBox('confirm', _timeout=_timeout, msg=msg)
| 683,155
|
Get value length for a key in rd.
For a key at position pos in the Report Descriptor rd, return the length
of the associated value. This supports both short and long format
values.
Args:
rd: Report Descriptor
pos: The position of the key in rd.
Returns:
(key_size, data_len) where key_size is the number of bytes occupied by
the key and data_len is the length of the value associated by the key.
|
def GetValueLength(rd, pos):
rd = bytearray(rd)
key = rd[pos]
if key == LONG_ITEM_ENCODING:
# If the key is tagged as a long item (0xfe), then the format is
# [key (1 byte)] [data len (1 byte)] [item tag (1 byte)] [data (n # bytes)].
# Thus, the entire key record is 3 bytes long.
if pos + 1 < len(rd):
return (3, rd[pos + 1])
else:
raise errors.HidError('Malformed report descriptor')
else:
# If the key is tagged as a short item, then the item tag and data len are
# packed into one byte. The format is thus:
# [tag (high 4 bits)] [type (2 bits)] [size code (2 bits)] [data (n bytes)].
# The size code specifies 1,2, or 4 bytes (0x03 means 4 bytes).
code = key & 0x03
if code <= 0x02:
return (1, code)
elif code == 0x03:
return (1, 4)
raise errors.HidError('Cannot happen')
| 685,519
|
Parse the binary report descriptor.
Parse the binary report descriptor into a DeviceDescriptor object.
Args:
rd: The binary report descriptor
desc: The DeviceDescriptor object to update with the results
from parsing the descriptor.
Returns:
None
|
def ParseReportDescriptor(rd, desc):
rd = bytearray(rd)
pos = 0
report_count = None
report_size = None
usage_page = None
usage = None
while pos < len(rd):
key = rd[pos]
# First step, determine the value encoding (either long or short).
key_size, value_length = GetValueLength(rd, pos)
if key & REPORT_DESCRIPTOR_KEY_MASK == INPUT_ITEM:
if report_count and report_size:
byte_length = (report_count * report_size) // 8
desc.internal_max_in_report_len = max(
desc.internal_max_in_report_len, byte_length)
report_count = None
report_size = None
elif key & REPORT_DESCRIPTOR_KEY_MASK == OUTPUT_ITEM:
if report_count and report_size:
byte_length = (report_count * report_size) // 8
desc.internal_max_out_report_len = max(
desc.internal_max_out_report_len, byte_length)
report_count = None
report_size = None
elif key & REPORT_DESCRIPTOR_KEY_MASK == COLLECTION_ITEM:
if usage_page:
desc.usage_page = usage_page
if usage:
desc.usage = usage
elif key & REPORT_DESCRIPTOR_KEY_MASK == REPORT_COUNT:
if len(rd) >= pos + 1 + value_length:
report_count = ReadLsbBytes(rd, pos + 1, value_length)
elif key & REPORT_DESCRIPTOR_KEY_MASK == REPORT_SIZE:
if len(rd) >= pos + 1 + value_length:
report_size = ReadLsbBytes(rd, pos + 1, value_length)
elif key & REPORT_DESCRIPTOR_KEY_MASK == USAGE_PAGE:
if len(rd) >= pos + 1 + value_length:
usage_page = ReadLsbBytes(rd, pos + 1, value_length)
elif key & REPORT_DESCRIPTOR_KEY_MASK == USAGE:
if len(rd) >= pos + 1 + value_length:
usage = ReadLsbBytes(rd, pos + 1, value_length)
pos += value_length + key_size
return desc
| 685,521
|
Fill out the attributes of the device.
Fills the devices HidAttributes and product string
into the descriptor.
Args:
device: A handle to the open device
descriptor: The DeviceDescriptor to populate with the
attributes.
Returns:
None
Raises:
WindowsError when unable to obtain attributes or product
string.
|
def FillDeviceAttributes(device, descriptor):
attributes = HidAttributes()
result = hid.HidD_GetAttributes(device, ctypes.byref(attributes))
if not result:
raise ctypes.WinError()
buf = ctypes.create_string_buffer(1024)
result = hid.HidD_GetProductString(device, buf, 1024)
if not result:
raise ctypes.WinError()
descriptor.vendor_id = attributes.VendorID
descriptor.product_id = attributes.ProductID
descriptor.product_string = ctypes.wstring_at(buf)
| 685,527
|
Fill out device capabilities.
Fills the HidCapabilitites of the device into descriptor.
Args:
device: A handle to the open device
descriptor: DeviceDescriptor to populate with the
capabilities
Returns:
none
Raises:
WindowsError when unable to obtain capabilitites.
|
def FillDeviceCapabilities(device, descriptor):
preparsed_data = PHIDP_PREPARSED_DATA(0)
ret = hid.HidD_GetPreparsedData(device, ctypes.byref(preparsed_data))
if not ret:
raise ctypes.WinError()
try:
caps = HidCapabilities()
ret = hid.HidP_GetCaps(preparsed_data, ctypes.byref(caps))
if ret != HIDP_STATUS_SUCCESS:
raise ctypes.WinError()
descriptor.usage = caps.Usage
descriptor.usage_page = caps.UsagePage
descriptor.internal_max_in_report_len = caps.InputReportByteLength
descriptor.internal_max_out_report_len = caps.OutputReportByteLength
finally:
hid.HidD_FreePreparsedData(preparsed_data)
| 685,528
|
Send an APDU to the device.
Sends an APDU to the device, possibly falling back to the legacy
encoding format that is not ISO7816-4 compatible.
Args:
apdu_to_send: The CommandApdu object to send
Returns:
The ResponseApdu object constructed out of the devices reply.
|
def InternalSendApdu(self, apdu_to_send):
response = None
if not self.use_legacy_format:
response = apdu.ResponseApdu(self.transport.SendMsgBytes(
apdu_to_send.ToByteArray()))
if response.sw1 == 0x67 and response.sw2 == 0x00:
# If we failed using the standard format, retry with the
# legacy format.
self.use_legacy_format = True
return self.InternalSendApdu(apdu_to_send)
else:
response = apdu.ResponseApdu(self.transport.SendMsgBytes(
apdu_to_send.ToLegacyU2FByteArray()))
return response
| 685,547
|
Obtains the unique path for the device.
Args:
device_handle: reference to the device
Returns:
A unique path for the device, obtained from the IO Registry
|
def GetDevicePath(device_handle):
# Obtain device path from IO Registry
io_service_obj = iokit.IOHIDDeviceGetService(device_handle)
str_buffer = ctypes.create_string_buffer(DEVICE_PATH_BUFFER_SIZE)
iokit.IORegistryEntryGetPath(io_service_obj, K_IO_SERVICE_PLANE, str_buffer)
return str_buffer.value
| 685,550
|
Binds a device to the thread's run loop, then starts the run loop.
Args:
hid_device: The MacOsHidDevice object
The HID manager requires a run loop to handle Report reads. This thread
function serves that purpose.
|
def DeviceReadThread(hid_device):
# Schedule device events with run loop
hid_device.run_loop_ref = cf.CFRunLoopGetCurrent()
if not hid_device.run_loop_ref:
logger.error('Failed to get current run loop')
return
iokit.IOHIDDeviceScheduleWithRunLoop(hid_device.device_handle,
hid_device.run_loop_ref,
K_CF_RUNLOOP_DEFAULT_MODE)
# Run the run loop
run_loop_run_result = K_CF_RUN_LOOP_RUN_TIMED_OUT
while (run_loop_run_result == K_CF_RUN_LOOP_RUN_TIMED_OUT or
run_loop_run_result == K_CF_RUN_LOOP_RUN_HANDLED_SOURCE):
run_loop_run_result = cf.CFRunLoopRunInMode(
K_CF_RUNLOOP_DEFAULT_MODE,
1000, # Timeout in seconds
False) # Return after source handled
# log any unexpected run loop exit
if run_loop_run_result != K_CF_RUN_LOOP_RUN_STOPPED:
logger.error('Unexpected run loop exit code: %d', run_loop_run_result)
# Unschedule from run loop
iokit.IOHIDDeviceUnscheduleFromRunLoop(hid_device.device_handle,
hid_device.run_loop_ref,
K_CF_RUNLOOP_DEFAULT_MODE)
| 685,552
|
Initialize the Plugin Manager for Workbench.
Args:
plugin_callback: The callback for plugin. This is called when plugin is added.
plugin_dir: The dir where plugin resides.
|
def __init__(self, plugin_callback, plugin_dir = 'workers'):
# Set the callback, the plugin directory and load the plugins
self.plugin_callback = plugin_callback
self.plugin_dir = plugin_dir
self.load_all_plugins()
# Now setup dynamic monitoring of the plugins directory
self.watcher = dir_watcher.DirWatcher(self.plugin_path)
self.watcher.register_callbacks(self.on_created, self.on_modified, self.on_deleted)
self.watcher.start_monitoring()
| 685,736
|
Remvoing a deleted plugin.
Args:
f: the filepath for the plugin.
|
def remove_plugin(self, f):
if f.endswith('.py'):
plugin_name = os.path.splitext(os.path.basename(f))[0]
print '- %s %sREMOVED' % (plugin_name, color.Red)
print '\t%sNote: still in memory, restart Workbench to remove...%s' % \
(color.Yellow, color.Normal)
| 685,738
|
Adding and verifying plugin.
Args:
f: the filepath for the plugin.
|
def add_plugin(self, f):
if f.endswith('.py'):
# Just the basename without extension
plugin_name = os.path.splitext(os.path.basename(f))[0]
# It's possible the plugin has been modified and needs to be reloaded
if plugin_name in sys.modules:
try:
handler = reload(sys.modules[plugin_name])
print'\t- %s %sRELOAD%s' % (plugin_name, color.Yellow, color.Normal)
except ImportError, error:
print 'Failed to import plugin: %s (%s)' % (plugin_name, error)
return
else:
# Not already loaded so try to import it
try:
handler = __import__(plugin_name, globals(), locals(), [], -1)
except ImportError, error:
print 'Failed to import plugin: %s (%s)' % (plugin_name, error)
return
# Run the handler through plugin validation
plugin = self.validate(handler)
print '\t- %s %sOK%s' % (plugin_name, color.Green, color.Normal)
if plugin:
# Okay must be successfully loaded so capture the plugin meta-data,
# modification time and register the plugin through the callback
plugin['name'] = plugin_name
plugin['dependencies'] = plugin['class'].dependencies
plugin['docstring'] = plugin['class'].__doc__
plugin['mod_time'] = datetime.utcfromtimestamp(os.path.getmtime(f))
# Plugin may accept sample_sets as input
try:
plugin['sample_set_input'] = getattr(plugin['class'], 'sample_set_input')
except AttributeError:
plugin['sample_set_input'] = False
# Now pass the plugin back to workbench
self.plugin_callback(plugin)
| 685,739
|
Validate the plugin, each plugin must have the following:
1) The worker class must have an execute method: execute(self, input_data).
2) The worker class must have a dependencies list (even if it's empty).
3) The file must have a top level test() method.
Args:
handler: the loaded plugin.
|
def validate(self, handler):
# Check for the test method first
test_method = self.plugin_test_validation(handler)
if not test_method:
return None
# Here we iterate through the classes found in the module and pick
# the first one that satisfies the validation
for name, plugin_class in inspect.getmembers(handler, inspect.isclass):
if self.plugin_class_validation(plugin_class):
return {'class':plugin_class, 'test':test_method}
# If we're here the plugin didn't pass validation
print 'Failure for plugin: %s' % (handler.__name__)
print 'Validation Error: Worker class is required to have a dependencies list and an execute method'
return None
| 685,740
|
Plugin validation.
Every workbench plugin must have top level test method.
Args:
handler: The loaded plugin.
Returns:
None if the test fails or the test function.
|
def plugin_test_validation(self, handler):
methods = {name:func for name, func in inspect.getmembers(handler, callable)}
if 'test' not in methods.keys():
print 'Failure for plugin: %s' % (handler.__name__)
print 'Validation Error: The file must have a top level test() method'
return None
else:
return methods['test']
| 685,741
|
Plugin validation
Every workbench plugin must have a dependencies list (even if it's empty).
Every workbench plugin must have an execute method.
Args:
plugin_class: The loaded plugun class.
Returns:
True if dependencies and execute are present, else False.
|
def plugin_class_validation(self, plugin_class):
try:
getattr(plugin_class, 'dependencies')
getattr(plugin_class, 'execute')
except AttributeError:
return False
return True
| 685,742
|
Initialization for the Workbench data store class.
Args:
uri: Connection String for DataStore backend.
database: Name of database.
worker_cap: MBs in the capped collection.
samples_cap: MBs of sample to be stored.
|
def __init__(self, uri='mongodb://localhost/workbench', database='workbench', worker_cap=0, samples_cap=0):
self.sample_collection = 'samples'
self.worker_cap = worker_cap
self.samples_cap = samples_cap
# Get connection to mongo
self.database_name = database
self.uri = 'mongodb://'+uri+'/'+self.database_name
self.mongo = pymongo.MongoClient(self.uri, use_greenlets=True)
self.database = self.mongo.get_default_database()
# Get the gridfs handle
self.gridfs_handle = gridfs.GridFS(self.database)
# Run the periodic operations
self.last_ops_run = time.time()
self.periodic_ops()
print '\t- WorkBench DataStore connected: %s:%s' % (self.uri, self.database_name)
| 685,748
|
Store a sample into the datastore.
Args:
filename: Name of the file.
sample_bytes: Actual bytes of sample.
type_tag: Type of sample ('exe','pcap','pdf','json','swf', or ...)
Returns:
md5 digest of the sample.
|
def store_sample(self, sample_bytes, filename, type_tag):
# Temp sanity check for old clients
if len(filename) > 1000:
print 'switched bytes/filename... %s %s' % (sample_bytes[:100], filename[:100])
exit(1)
sample_info = {}
# Compute the MD5 hash
sample_info['md5'] = hashlib.md5(sample_bytes).hexdigest()
# Check if sample already exists
if self.has_sample(sample_info['md5']):
return sample_info['md5']
# Run the periodic operations
self.periodic_ops()
# Check if we need to expire anything
self.expire_data()
# Okay start populating the sample for adding to the data store
# Filename, length, import time and type_tag
sample_info['filename'] = filename
sample_info['length'] = len(sample_bytes)
sample_info['import_time'] = datetime.datetime.utcnow()
sample_info['type_tag'] = type_tag
# Random customer for now
import random
sample_info['customer'] = random.choice(['Mega Corp', 'Huge Inc', 'BearTron', 'Dorseys Mom'])
# Push the file into the MongoDB GridFS
sample_info['__grid_fs'] = self.gridfs_handle.put(sample_bytes)
self.database[self.sample_collection].insert(sample_info)
# Print info
print 'Sample Storage: %.2f out of %.2f MB' % (self.sample_storage_size(), self.samples_cap)
# Return the sample md5
return sample_info['md5']
| 685,749
|
Clean data in preparation for serialization.
Deletes items having key either a BSON, datetime, dict or a list instance, or
starting with __.
Args:
data: Sample data to be serialized.
Returns:
Cleaned data dictionary.
|
def clean_for_serialization(self, data):
if isinstance(data, dict):
for k in data.keys():
if (k.startswith('__')):
del data[k]
elif isinstance(data[k], bson.objectid.ObjectId):
del data[k]
elif isinstance(data[k], datetime.datetime):
data[k] = data[k].isoformat()+'Z'
elif isinstance(data[k], dict):
data[k] = self.clean_for_serialization(data[k])
elif isinstance(data[k], list):
data[k] = [self.clean_for_serialization(item) for item in data[k]]
return data
| 685,753
|
Clean data in preparation for storage.
Deletes items with key having a '.' or is '_id'. Also deletes those items
whose value is a dictionary or a list.
Args:
data: Sample data dictionary to be cleaned.
Returns:
Cleaned data dictionary.
|
def clean_for_storage(self, data):
data = self.data_to_unicode(data)
if isinstance(data, dict):
for k in dict(data).keys():
if k == '_id':
del data[k]
continue
if '.' in k:
new_k = k.replace('.', '_')
data[new_k] = data[k]
del data[k]
k = new_k
if isinstance(data[k], dict):
data[k] = self.clean_for_storage(data[k])
elif isinstance(data[k], list):
data[k] = [self.clean_for_storage(item) for item in data[k]]
return data
| 685,754
|
Get the sample from the data store.
This method first fetches the data from datastore, then cleans it for serialization
and then updates it with 'raw_bytes' item.
Args:
md5: The md5 digest of the sample to be fetched from datastore.
Returns:
The sample dictionary or None
|
def get_sample(self, md5):
# Support 'short' md5s but don't waste performance if the full md5 is provided
if len(md5) < 32:
md5 = self.get_full_md5(md5, self.sample_collection)
# Grab the sample
sample_info = self.database[self.sample_collection].find_one({'md5': md5})
if not sample_info:
return None
# Get the raw bytes from GridFS (note: this could fail)
try:
grid_fs_id = sample_info['__grid_fs']
sample_info = self.clean_for_serialization(sample_info)
sample_info.update({'raw_bytes':self.gridfs_handle.get(grid_fs_id).read()})
return sample_info
except gridfs.errors.CorruptGridFile:
# If we don't have the gridfs files, delete the entry from samples
self.database[self.sample_collection].update({'md5': md5}, {'md5': None})
return None
| 685,756
|
Get a window of samples not to exceed size (in MB).
Args:
type_tag: Type of sample ('exe','pcap','pdf','json','swf', or ...).
size: Size of samples in MBs.
Returns:
a list of md5s.
|
def get_sample_window(self, type_tag, size=10):
# Convert size to MB
size = size * 1024 * 1024
# Grab all the samples of type=type_tag, sort by import_time (newest to oldest)
cursor = self.database[self.sample_collection].find({'type_tag': type_tag},
{'md5': 1,'length': 1}).sort('import_time',pymongo.DESCENDING)
total_size = 0
md5_list = []
for item in cursor:
if total_size > size:
return md5_list
md5_list.append(item['md5'])
total_size += item['length']
# If you get this far you don't have 'size' amount of data
# so just return what you've got
return md5_list
| 685,757
|
Checks if data store has this sample.
Args:
md5: The md5 digest of the required sample.
Returns:
True if sample with this md5 is present, else False.
|
def has_sample(self, md5):
# The easiest thing is to simply get the sample and if that
# succeeds than return True, else return False
sample = self.get_sample(md5)
return True if sample else False
| 685,758
|
List all samples that meet the predicate or all if predicate is not specified.
Args:
predicate: Match samples against this predicate (or all if not specified)
Returns:
List of the md5s for the matching samples
|
def _list_samples(self, predicate=None):
cursor = self.database[self.sample_collection].find(predicate, {'_id':0, 'md5':1})
return [item['md5'] for item in cursor]
| 685,759
|
List all samples that match the tags or all if tags are not specified.
Args:
tags: Match samples against these tags (or all if not specified)
Returns:
List of the md5s for the matching samples
|
def tag_match(self, tags=None):
if 'tags' not in self.database.collection_names():
print 'Warning: Searching on non-existance tags collection'
return None
if not tags:
cursor = self.database['tags'].find({}, {'_id':0, 'md5':1})
else:
cursor = self.database['tags'].find({'tags': {'$in': tags}}, {'_id':0, 'md5':1})
# We have the tags, now make sure we only return those md5 which
# also exist in the samples collection
tag_md5s = set([item['md5'] for item in cursor])
sample_md5s = set(item['md5'] for item in self.database['samples'].find({}, {'_id':0, 'md5':1}))
return list(tag_md5s.intersection(sample_md5s))
| 685,760
|
List of the tags and md5s for all samples
Args:
None
Returns:
List of the tags and md5s for all samples
|
def tags_all(self):
if 'tags' not in self.database.collection_names():
print 'Warning: Searching on non-existance tags collection'
return None
cursor = self.database['tags'].find({}, {'_id':0, 'md5':1, 'tags':1})
return [item for item in cursor]
| 685,761
|
Store the output results of the worker.
Args:
results: a dictionary.
collection: the database collection to store the results in.
md5: the md5 of sample data to be updated.
|
def store_work_results(self, results, collection, md5):
# Make sure the md5 and time stamp is on the data before storing
results['md5'] = md5
results['__time_stamp'] = datetime.datetime.utcnow()
# If the data doesn't have a 'mod_time' field add one now
if 'mod_time' not in results:
results['mod_time'] = results['__time_stamp']
# Fixme: Occasionally a capped collection will not let you update with a
# larger object, if you have MongoDB 2.6 or above this shouldn't
# really happen, so for now just kinda punting and giving a message.
try:
self.database[collection].update({'md5':md5}, self.clean_for_storage(results), True)
except pymongo.errors.OperationFailure:
#self.database[collection].insert({'md5':md5}, self.clean_for_storage(results), True)
print 'Could not update exising object in capped collection, punting...'
print 'collection: %s md5:%s' % (collection, md5)
| 685,762
|
Return a list of all md5 matching the type_tag ('exe','pdf', etc).
Args:
type_tag: the type of sample.
Returns:
a list of matching samples.
|
def all_sample_md5s(self, type_tag=None):
if type_tag:
cursor = self.database[self.sample_collection].find({'type_tag': type_tag}, {'md5': 1, '_id': 0})
else:
cursor = self.database[self.sample_collection].find({}, {'md5': 1, '_id': 0})
return [match.values()[0] for match in cursor]
| 685,763
|
Convert an elementary datatype to unicode.
Args:
s: the datatype to be unicoded.
Returns:
Unicoded data.
|
def to_unicode(self, s):
# Fixme: This is total horseshit
if isinstance(s, unicode):
return s
if isinstance(s, str):
return unicode(s, errors='ignore')
# Just return the original object
return s
| 685,766
|
Recursively convert a list or dictionary to unicode.
Args:
data: The data to be unicoded.
Returns:
Unicoded data.
|
def data_to_unicode(self, data):
if isinstance(data, dict):
return {self.to_unicode(k): self.to_unicode(v) for k, v in data.iteritems()}
if isinstance(data, list):
return [self.to_unicode(l) for l in data]
else:
return self.to_unicode(data)
| 685,767
|
Initialization for the Elastic Search Indexer.
Args:
hosts: List of connection settings.
|
def __init__(self, hosts=None):
# Set default value for hosts
if not hosts:
hosts = [{"host": "localhost", "port": 9200}]
# Get connection to ElasticSearch
try:
self.els_search = elasticsearch.Elasticsearch(hosts)
info = self.els_search.info()
version = info['version']
print '\t- ELS Indexer connected: %s %s %s %s' % (str(hosts), info['name'],
version['number'], version['lucene_version'])
except elasticsearch.exceptions.ConnectionError:
print '\t- ELS connection failed! Is your ELS server running?'
exit(1)
| 685,782
|
Take an arbitrary dictionary of data and index it with ELS.
Args:
data: data to be Indexed. Should be a dictionary.
index_name: Name of the index.
doc_type: The type of the document.
Raises:
RuntimeError: When the Indexing fails.
|
def index_data(self, data, index_name, doc_type):
# Index the data (which needs to be a dict/object) if it's not
# we're going to toss an exception
if not isinstance(data, dict):
raise RuntimeError('Index failed, data needs to be a dict!')
try:
self.els_search.index(index=index_name, doc_type=doc_type, body=data)
except Exception, error:
print 'Index failed: %s' % str(error)
raise RuntimeError('Index failed: %s' % str(error))
| 685,783
|
Search the given index_name with the given ELS query.
Args:
index_name: Name of the Index
query: The string to be searched.
Returns:
List of results.
Raises:
RuntimeError: When the search query fails.
|
def search(self, index_name, query):
try:
results = self.els_search.search(index=index_name, body=query)
return results
except Exception, error:
error_str = 'Query failed: %s\n' % str(error)
error_str += '\nIs there a dynamic script in the query?, see www.elasticsearch.org'
print error_str
raise RuntimeError(error_str)
| 685,784
|
Initialize the Framework.
Args:
store_args: Dictionary with keys uri,database,samples_cap, worker_cap.
els_hosts: The address where Elastic Search Indexer is running.
neo_uri: The address where Neo4j is running.
|
def __init__(self, store_args=None, els_hosts=None, neo_uri=None):
# Needs to be replaced by logger
self.VERBOSE = False
# Workbench Server Version
self.version = version.__version__
print '<<< Workbench Server Version %s >>>' % self.version
# Open DataStore
self.data_store = data_store.DataStore(**store_args)
# ELS Indexer
try:
self.indexer = els_indexer.ELSIndexer(**{'hosts': els_hosts} if els_hosts else {})
except SystemExit:
print 'Could not connect to ELS. Is it running?'
self.indexer = els_indexer.ELSStubIndexer(**{'uri': neo_uri} if neo_uri else {})
# Neo4j DB
try:
self.neo_db = neo_db.NeoDB(**{'uri': neo_uri} if neo_uri else {})
except RuntimeError:
print 'Could not connect to Neo4j DB. Is it running? $ neo4j start'
self.neo_db = neo_db.NeoDBStub(**{'uri': neo_uri} if neo_uri else {})
# Create Plugin Manager
self.plugin_meta = {}
plugin_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),'../workers')
self.plugin_manager = plugin_manager.PluginManager(self._new_plugin, plugin_dir=plugin_dir)
# Store information about commands and workbench
self._store_information()
| 685,791
|
Store a sample into the DataStore.
Args:
input_bytes: the actual bytes of the sample e.g. f.read()
filename: name of the file (used purely as meta data not for lookup)
type_tag: ('exe','pcap','pdf','json','swf', or ...)
Returns:
the md5 of the sample.
|
def store_sample(self, input_bytes, filename, type_tag):
# If the sample comes in with an unknown type_tag try to determine it
if type_tag == 'unknown':
print 'Info: Unknown File -- Trying to Determine Type...'
type_tag = self.guess_type_tag(input_bytes, filename)
# Do we have a compressed sample? If so decompress it
if type_tag == 'lz4':
input_bytes = lz4.loads(input_bytes)
# Store the sample
md5 = self.data_store.store_sample(input_bytes, filename, type_tag)
# Add the type_tags to tags
if type_tag != 'lz4':
self.add_tags(md5, type_tag)
return md5
| 685,792
|
Get a sample from the DataStore.
Args:
md5: the md5 of the sample
Returns:
A dictionary of meta data about the sample which includes
a ['raw_bytes'] key that contains the raw bytes.
Raises:
Workbench.DataNotFound if the sample is not found.
|
def get_sample(self, md5):
# First we try a sample, if we can't find one we try getting a sample_set.
sample = self.data_store.get_sample(md5)
if not sample:
return {'sample_set': {'md5_list': self.get_sample_set(md5)}}
return {'sample': sample}
| 685,793
|
Does the md5 represent a sample_set?
Args:
md5: the md5 of the sample_set
Returns:
True/False
|
def is_sample_set(self, md5):
try:
self.get_sample_set(md5)
return True
except WorkBench.DataNotFound:
return False
| 685,794
|
Get a sample from the DataStore.
Args:
type_tag: the type of samples ('pcap','exe','pdf')
size: the size of the window in MegaBytes (10 = 10MB)
Returns:
A sample_set handle which represents the newest samples within the size window
|
def get_sample_window(self, type_tag, size):
md5_list = self.data_store.get_sample_window(type_tag, size)
return self.store_sample_set(md5_list)
| 685,795
|
Combine samples together. This may have various use cases the most significant
involving a bunch of sample 'chunks' got uploaded and now we combine them together
Args:
md5_list: The list of md5s to combine, order matters!
filename: name of the file (used purely as meta data not for lookup)
type_tag: ('exe','pcap','pdf','json','swf', or ...)
Returns:
the computed md5 of the combined samples
|
def combine_samples(self, md5_list, filename, type_tag):
total_bytes = ""
for md5 in md5_list:
total_bytes += self.get_sample(md5)['sample']['raw_bytes']
self.remove_sample(md5)
# Store it
return self.store_sample(total_bytes, filename, type_tag)
| 685,796
|
Stream the sample by giving back a generator, typically used on 'logs'.
Args:
md5: the md5 of the sample
kwargs: a way of specifying subsets of samples (None for all)
max_rows: the maximum number of rows to return
Returns:
A generator that yields rows of the file/log
|
def stream_sample(self, md5, kwargs=None):
# Get the max_rows if specified
max_rows = kwargs.get('max_rows', None) if kwargs else None
# Grab the sample and it's raw bytes
sample = self.get_sample(md5)['sample']
raw_bytes = sample['raw_bytes']
# Figure out the type of file to be streamed
type_tag = sample['type_tag']
if type_tag == 'bro':
bro_log = bro_log_reader.BroLogReader(convert_datetimes=False)
mem_file = StringIO(raw_bytes)
generator = bro_log.read_log(mem_file)
return generator
elif type_tag == 'els_query':
els_log = json.loads(raw_bytes)
# Try to determine a couple of different types of ELS query results
if 'fields' in els_log['hits']['hits'][0]:
generator = (row['fields'] for row in els_log['hits']['hits'][:max_rows])
else:
generator = (row['_source'] for row in els_log['hits']['hits'][:max_rows])
return generator
elif type_tag == 'log':
generator = ({'row':row} for row in raw_bytes.split('\n')[:max_rows])
return generator
elif type_tag == 'json':
generator = (row for row in json.loads(raw_bytes)[:max_rows])
return generator
else:
raise RuntimeError('Cannot stream file %s with type_tag:%s' % (md5, type_tag))
| 685,797
|
Return a dataframe from the DataStore. This is just a convenience method
that uses get_sample internally.
Args:
md5: the md5 of the dataframe
compress: compression to use: (defaults to 'lz4' but can be set to None)
Returns:
A msgpack'd Pandas DataFrame
Raises:
Workbench.DataNotFound if the dataframe is not found.
|
def get_dataframe(self, md5, compress='lz4'):
# First we try a sample, if we can't find one we try getting a sample_set.
sample = self.data_store.get_sample(md5)
if not sample:
raise WorkBench.DataNotFound("Could not find %s in the data store", md5)
if not compress:
return sample['raw_bytes']
else:
compress_df = lz4.dumps(sample['raw_bytes'])
print 'Info: DataFrame compression %.0f%%' % (len(compress_df)*100.0/float(len(sample['raw_bytes'])))
return compress_df
| 685,798
|
Index a stored sample with the Indexer.
Args:
md5: the md5 of the sample
index_name: the name of the index
Returns:
Nothing
|
def index_sample(self, md5, index_name):
generator = self.stream_sample(md5)
for row in generator:
self.indexer.index_data(row, index_name)
| 685,803
|
Index worker output with the Indexer.
Args:
worker_name: 'strings', 'pe_features', whatever
md5: the md5 of the sample
index_name: the name of the index
subfield: index just this subfield (None for all)
Returns:
Nothing
|
def index_worker_output(self, worker_name, md5, index_name, subfield):
# Grab the data
if subfield:
data = self.work_request(worker_name, md5)[worker_name][subfield]
else:
data = self.work_request(worker_name, md5)[worker_name]
# Okay now index the data
self.indexer.index_data(data, index_name=index_name, doc_type='unknown')
| 685,804
|
Add a node to the graph with name and labels.
Args:
node_id: the unique node_id e.g. 'www.evil4u.com'
name: the display name of the node e.g. 'evil4u'
labels: a list of labels e.g. ['domain','evil']
Returns:
Nothing
|
def add_node(self, node_id, name, labels):
self.neo_db.add_node(node_id, name, labels)
| 685,805
|
Add a relationship: source, target must already exist (see add_node)
'rel' is the name of the relationship 'contains' or whatever.
Args:
source_id: the unique node_id of the source
target_id: the unique node_id of the target
rel: name of the relationship
Returns:
Nothing
|
def add_rel(self, source_id, target_id, rel):
self.neo_db.add_rel(source_id, target_id, rel)
| 685,806
|
Clear the Main Database of all samples and worker output.
Args:
None
Returns:
Nothing
|
def clear_db(self):
self.data_store.clear_db()
# Have the plugin manager reload all the plugins
self.plugin_manager.load_all_plugins()
# Store information about commands and workbench
self._store_information()
| 685,807
|
Drops all of the worker output collections
Args:
None
Returns:
Nothing
|
def clear_worker_output(self):
self.data_store.clear_worker_output()
# Have the plugin manager reload all the plugins
self.plugin_manager.load_all_plugins()
# Store information about commands and workbench
self._store_information()
| 685,808
|
Make a work request for an existing stored sample.
Args:
worker_name: 'strings', 'pe_features', whatever
md5: the md5 of the sample (or sample_set!)
subkeys: just get a subkey of the output: 'foo' or 'foo.bar' (None for all)
Returns:
The output of the worker.
|
def work_request(self, worker_name, md5, subkeys=None):
# Pull the worker output
work_results = self._recursive_work_resolver(worker_name, md5)
# Subkeys (Fixme this is super klutzy)
if subkeys:
if isinstance(subkeys, str):
subkeys = [subkeys]
try:
sub_results = {}
for subkey in subkeys:
tmp = work_results[worker_name]
# Traverse any subkeys
for key in subkey.split('.')[:-1]:
tmp = tmp[key]
# Last subkey
key = subkey.split('.')[-1]
if key == '*':
for key in tmp.keys():
sub_results[key] = tmp[key]
else:
sub_results[key] = tmp[key]
# Set the output
work_results = sub_results
except (KeyError, TypeError):
raise RuntimeError('Could not get one or more subkeys for: %s' % (work_results))
# Clean it and ship it
return self.data_store.clean_for_serialization(work_results)
| 685,809
|
Make a work request for an existing stored sample (or sample_set).
Args:
worker_name: 'strings', 'pe_features', whatever
sample_set: the md5 of a sample_set in the Workbench data store
subkeys: just get a subkey of the output: 'foo' or 'foo.bar' (None for all)
Returns:
The output is a generator of the results of the worker output for the sample_set
|
def set_work_request(self, worker_name, sample_set, subkeys=None):
# Does worker support sample_set_input?
if self.plugin_meta[worker_name]['sample_set_input']:
yield self.work_request(worker_name, sample_set, subkeys)
# Loop through all the md5s and return a generator with yield
else:
md5_list = self.get_sample_set(sample_set)
for md5 in md5_list:
if subkeys:
yield self.work_request(worker_name, md5, subkeys)
else:
yield self.work_request(worker_name, md5)[worker_name]
| 685,810
|
Store a sample set (which is just a list of md5s).
Note: All md5s must already be in the data store.
Args:
md5_list: a list of the md5s in this set (all must exist in data store)
Returns:
The md5 of the set (the actual md5 of the set)
|
def store_sample_set(self, md5_list):
# Sanity check
if not md5_list:
print 'Warning: Trying to store an empty sample_set'
return None
# Remove any duplicates
md5_list = list(set(md5_list))
for md5 in md5_list:
if not self.has_sample(md5):
raise RuntimeError('%s: Not found! All items in sample_set must be in the datastore' % (md5))
set_md5 = hashlib.md5(str(md5_list)).hexdigest()
self._store_work_results({'md5_list':md5_list}, 'sample_set', set_md5)
return set_md5
| 685,811
|
Generate a sample_set that maches the tags or all if tags are not specified.
Args:
tags: Match samples against this tag list (or all if not specified)
Returns:
The sample_set of those samples matching the tags
|
def generate_sample_set(self, tags=None):
if isinstance(tags, str):
tags = [tags]
md5_list = self.data_store.tag_match(tags)
return self.store_sample_set(md5_list)
| 685,812
|
Load a sample (or samples) into workbench
Args:
file_path: path to a file or directory
tags (optional): a list of tags for the sample/samples ['bad','aptz13']
Returns:
The list of md5s for all samples
|
def load_sample(self, file_path, tags=None):
# Recommend a tag
if not tags:
print '\n%sRequired: Add a list of tags when you load samples (put \'unknown\' if you must). \
\n\t%sExamples: [\'bad\'], [\'good\'], [\'bad\',\'aptz13\']%s' % (color.Yellow, color.Green, color.Normal)
return
# Do they want everything under a directory?
if os.path.isdir(file_path):
file_list = self._all_files_in_directory(file_path)
else:
file_list = [file_path]
# Upload the files into workbench
md5_list = []
for path in file_list:
with open(path, 'rb') as my_file:
raw_bytes = my_file.read()
md5 = hashlib.md5(raw_bytes).hexdigest()
if not self.workbench.has_sample(md5):
print '%sStreaming Sample...%s' % (color.LightPurple, color.Normal)
basename = os.path.basename(path)
md5 = self.streamer.stream_to_workbench(raw_bytes, basename, 'unknown', tags)
print '\n%s %s%s %sLocked and Loaded...%s\n' % \
(self.beer, color.LightPurple, md5[:6], color.Yellow, color.Normal)
# Add tags to the sample
self.workbench.add_tags(md5, tags)
md5_list.append(md5)
# Pivot on the sample_set
set_md5 = self.workbench.store_sample_set(md5_list)
self.pivot(set_md5, '_'.join(tags))
# Dump out tag information
self.tags()
| 685,830
|
Pivot on an md5 (md5 can be a single sample or a sample_set)
Args:
md5: The md5 can be a single sample or a sample_set
tags (optional): a tag for the sample (for the prompt)
Returns:
Nothing but it's sets the active sample/sample_set
|
def pivot(self, md5, tag=''):
# Is the md5 a tag?
ss = self.workbench.generate_sample_set(md5)
if ss:
tag = md5 if not tag else tag
md5 = ss
# Is the md5 a sample_set?
if self.workbench.is_sample_set(md5):
# Is the sample_set one sample?
ss = self.workbench.get_sample_set(md5)
if len(ss) == 1:
md5 = ss[0]
deco = '(%s:%d)' % (tag, len(ss))
self.ipshell.push({'prompt_deco': deco})
else:
deco = '(%s:1)' % tag
self.ipshell.push({'prompt_deco': deco})
# Set the new md5
self.session.md5 = md5
self.session.short_md5 = md5[:6]
self.ipshell.push({'md5': self.session.md5})
self.ipshell.push({'short_md5': self.session.short_md5})
| 685,831
|
Wrapper for the Workbench get_dataframe method
Args:
md5: pull the dataframe identified by this md5
Returns:
The uncompressed/unserialized dataframe
|
def pull_df(self, md5):
try:
_packed_df = self.workbench.get_dataframe(md5)
_df = pd.read_msgpack(lz4.loads(_packed_df))
return _df
except zerorpc.exceptions.RemoteError as e:
return repr_to_str_decorator.r_to_s(self._data_not_found)(e)
| 685,833
|
Wrapper for the Workbench search method
Args:
tags: a single tag 'pcap' or a list of tags to search for ['bad','aptz13']
Returns:
A sample_set that contains the md5s for all matching samples
|
def search(self, tags=None):
if isinstance(tags, str):
tags = [tags]
return self.workbench.generate_sample_set(tags)
| 685,837
|
Deserialize ``s`` to a BioC collection object.
Args:
s: a "str" instance containing a BioC collection
Returns:
an object of BioCollection
|
def decodes(self, s: str) -> BioCCollection:
tree = etree.parse(io.BytesIO(bytes(s, encoding='UTF-8')))
collection = self.__parse_collection(tree.getroot())
collection.encoding = tree.docinfo.encoding
collection.standalone = tree.docinfo.standalone
collection.version = tree.docinfo.xml_version
return collection
| 685,846
|
Deserialize ``fp`` to a BioC collection object.
Args:
fp: a ``.read()``-supporting file-like object containing a BioC collection
Returns:
an object of BioCollection
|
def decode(self, fp: TextIO) -> BioCCollection:
# utf8_parser = etree.XMLParser(encoding='utf-8')
tree = etree.parse(fp)
collection = self.__parse_collection(tree.getroot())
collection.encoding = tree.docinfo.encoding
collection.standalone = tree.docinfo.standalone
collection.version = tree.docinfo.xml_version
return collection
| 685,847
|
Encode and write a single object.
Args:
obj: an instance of BioCDocument, BioCPassage, or BioCSentence
Returns:
|
def write(self, obj: BioCDocument or BioCPassage or BioCSentence):
if self.level == DOCUMENT and not isinstance(obj, BioCDocument):
raise ValueError
if self.level == PASSAGE and not isinstance(obj, BioCPassage):
raise ValueError
if self.level == SENTENCE and not isinstance(obj, BioCSentence):
raise ValueError
self.writer.write(BioCJSONEncoder().default(obj))
| 685,879
|
Get the first node with role
Args:
role: role
default: node returned instead of raising StopIteration
Returns:
the first node with role
|
def get_node(self, role: str, default=None) -> BioCNode:
return next((node for node in self.nodes if node.role == role), default)
| 685,960
|
Gets sentence with specified offset
Args:
offset: sentence offset
Return:
the sentence with specified offset
|
def get_sentence(self, offset: int) -> BioCSentence or None:
for sentence in self.sentences:
if sentence.offset == offset:
return sentence
return None
| 685,965
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.