docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Associate the specified timeline and sketch.
Args:
sketch_id (int): ID of sketch
index_id (int): ID of timeline to add to sketch
|
def add_timeline_to_sketch(self, sketch_id, index_id):
resource_url = '{0:s}/sketches/{1:d}/timelines/'.format(
self.api_base_url, sketch_id)
form_data = {'timeline': [index_id]}
self.session.post(resource_url, json=form_data)
| 372,103
|
Get information on the specified sketch.
Args:
sketch_id (int): ID of sketch
Returns:
dict: Dictionary of sketch information
Raises:
ValueError: Sketch is inaccessible
|
def get_sketch(self, sketch_id):
resource_url = '{0:s}/sketches/{1:d}/'.format(self.api_base_url, sketch_id)
response = self.session.get(resource_url)
response_dict = response.json()
try:
response_dict['objects']
except KeyError:
raise ValueError('Sketch does not exist or you have no access')
return response_dict
| 372,104
|
Copies the attribute container from a dictionary.
Args:
attributes (dict[str, object]): attribute values per name.
|
def copy_from_dict(self, attributes):
for attribute_name, attribute_value in attributes.items():
# Not using startswith to improve performance.
if attribute_name[0] == '_':
continue
setattr(self, attribute_name, attribute_value)
| 372,105
|
Sets up the _keywords attribute.
Args:
keywords: pipe separated list of keyword to search
|
def setup(self, keywords=None): # pylint: disable=arguments-differ
self._keywords = keywords
self._output_path = tempfile.mkdtemp()
| 372,110
|
Parse PDF files text content for keywords.
Args:
path: PDF file path.
Returns:
match: set of unique occurrences of every match.
|
def grepPDF(self, path):
with open(path, 'rb') as pdf_file_obj:
match = set()
text = ''
pdf_reader = PyPDF2.PdfFileReader(pdf_file_obj)
pages = pdf_reader.numPages
for page in range(pages):
page_obj = pdf_reader.getPage(page)
text += '\n' + page_obj.extractText()
match.update(set(x.lower() for x in re.findall(
self._keywords, text, re.IGNORECASE)))
return match
| 372,112
|
Populates the internal module pool with modules declared in a recipe.
Args:
recipe: Dict, recipe declaring modules to load.
|
def load_recipe(self, recipe):
self.recipe = recipe
for module_description in recipe['modules']:
# Combine CLI args with args from the recipe description
module_name = module_description['name']
module = self.config.get_module(module_name)(self)
self._module_pool[module_name] = module
| 372,114
|
Thread-safe method to store data in the state's store.
Args:
container (containers.interface.AttributeContainer): The data to store.
|
def store_container(self, container):
with self._store_lock:
self.store.setdefault(container.CONTAINER_TYPE, []).append(container)
| 372,115
|
Thread-safe method to retrieve data from the state's store.
Args:
container_class: AttributeContainer class used to filter data.
Returns:
A list of AttributeContainer objects of matching CONTAINER_TYPE.
|
def get_containers(self, container_class):
with self._store_lock:
return self.store.get(container_class.CONTAINER_TYPE, [])
| 372,116
|
Performs setup tasks for each module in the module pool.
Threads declared modules' setup() functions. Takes CLI arguments into
account when replacing recipe parameters for each module.
Args:
args: Command line arguments that will be used to replace the parameters
declared in the recipe.
|
def setup_modules(self, args):
def _setup_module_thread(module_description):
new_args = utils.import_args_from_dict(
module_description['args'], vars(args), self.config)
module = self._module_pool[module_description['name']]
try:
module.setup(**new_args)
except Exception as error: # pylint: disable=broad-except
self.add_error(
'An unknown error occurred: {0!s}\nFull traceback:\n{1:s}'.format(
error, traceback.format_exc()),
critical=True)
self.events[module_description['name']] = threading.Event()
self.cleanup()
threads = []
for module_description in self.recipe['modules']:
t = threading.Thread(
target=_setup_module_thread,
args=(module_description, )
)
threads.append(t)
t.start()
for t in threads:
t.join()
self.check_errors(is_global=True)
| 372,117
|
Adds an error to the state.
Args:
error: The text that will be added to the error list.
critical: If set to True and the error is checked with check_errors, will
dfTimewolf will abort.
|
def add_error(self, error, critical=False):
self.errors.append((error, critical))
| 372,119
|
Checks for errors and exits if any of them are critical.
Args:
is_global: If True, check the global_errors attribute. If false, check the
error attribute.
|
def check_errors(self, is_global=False):
errors = self.global_errors if is_global else self.errors
if errors:
print('dfTimewolf encountered one or more errors:')
for error, critical in errors:
print('{0:s} {1:s}'.format('CRITICAL: ' if critical else '', error))
if critical:
print('Critical error found. Aborting.')
sys.exit(-1)
| 372,121
|
Initialize the Turbinia artifact processor object.
Args:
state: The dfTimewolf state object
|
def __init__(self, state):
super(TurbiniaProcessor, self).__init__(state)
self.client = None
self.disk_name = None
self.instance = None
self.project = None
self.turbinia_region = None
self.turbinia_zone = None
self._output_path = None
| 372,122
|
Sets up the object attributes.
Args:
disk_name (string): Name of the disk to process
project (string): The project containing the disk to process
turbinia_zone (string): The zone containing the disk to process
|
def setup(self, disk_name, project, turbinia_zone):
# TODO: Consider the case when multiple disks are provided by the previous
# module or by the CLI.
if project is None or turbinia_zone is None:
self.state.add_error(
'project or turbinia_zone are not all specified, bailing out',
critical=True)
return
self.disk_name = disk_name
self.project = project
self.turbinia_zone = turbinia_zone
try:
turbinia_config.LoadConfig()
self.turbinia_region = turbinia_config.TURBINIA_REGION
self.instance = turbinia_config.PUBSUB_TOPIC
if turbinia_config.PROJECT != self.project:
self.state.add_error(
'Specified project {0:s} does not match Turbinia configured '
'project {1:s}. Use gcp_turbinia_import recipe to copy the disk '
'into the same project.'.format(
self.project, turbinia_config.PROJECT), critical=True)
return
self._output_path = tempfile.mkdtemp()
self.client = turbinia_client.TurbiniaClient()
except TurbiniaException as e:
self.state.add_error(e, critical=True)
return
| 372,123
|
Pretty-prints task data.
Args:
task: Task dict generated by Turbinia.
|
def _print_task_data(self, task):
print(' {0:s} ({1:s})'.format(task['name'], task['id']))
paths = task.get('saved_paths', [])
if not paths:
return
for path in paths:
if path.endswith('worker-log.txt'):
continue
if path.endswith('{0:s}.log'.format(task.get('id'))):
continue
if path.startswith('/'):
continue
print(' ' + path)
| 372,124
|
Displays the overall progress of tasks in a Turbinia job.
Args:
instance (string): The name of the Turbinia instance
project (string): The project containing the disk to process
region (string): Region where turbinia is configured.
request_id (string): The request ID provided by Turbinia.
user (string): The username to filter tasks by.
poll_interval (int): The interval at which to poll for new results.
|
def display_task_progress(
self, instance, project, region, request_id=None, user=None,
poll_interval=60):
total_completed = 0
while True:
task_results = self.client.get_task_data(
instance, project, region, request_id=request_id, user=user)
tasks = {task['id']: task for task in task_results}
completed_tasks = set()
pending_tasks = set()
for task in tasks.values():
if task.get('successful') is not None:
completed_tasks.add(task['id'])
else:
pending_tasks.add(task['id'])
if len(completed_tasks) > total_completed or not completed_tasks:
total_completed = len(completed_tasks)
print('Task status update (completed: {0:d} | pending: {1:d})'.format(
len(completed_tasks), len(pending_tasks)))
print('Completed tasks:')
for task_id in completed_tasks:
self._print_task_data(tasks[task_id])
print('Pending tasks:')
for task_id in pending_tasks:
self._print_task_data(tasks[task_id])
if len(completed_tasks) == len(task_results) and completed_tasks:
print('All {0:d} Tasks completed'.format(len(task_results)))
return
time.sleep(poll_interval)
| 372,125
|
Initializes the Threat Intelligence container.
Args:
name (string): name of the threat
indicator (string): regular expression relevant to a threat
|
def __init__(self, name, indicator):
super(ThreatIntelligence, self).__init__()
self.name = name
self.indicator = indicator
| 372,132
|
Sets up the _target_directory attribute.
Args:
target_directory: Directory in which collected files will be dumped.
|
def setup(self, target_directory=None): # pylint: disable=arguments-differ
self._target_directory = target_directory
if not target_directory:
self._target_directory = tempfile.mkdtemp()
elif not os.path.exists(target_directory):
try:
os.makedirs(target_directory)
except OSError as exception:
message = 'An unknown error occurred: {0!s}'.format(exception)
self.state.add_error(message, critical=True)
| 372,134
|
Recursively copies files from source to destination_directory.
Args:
source: source file or directory to copy into destination_directory
destination_directory: destination directory in which to copy source
|
def _copy_file_or_directory(self, source, destination_directory):
if os.path.isdir(source):
for item in os.listdir(source):
full_source = os.path.join(source, item)
full_destination = os.path.join(destination_directory, item)
shutil.copytree(full_source, full_destination)
else:
shutil.copy2(source, destination_directory)
| 372,136
|
Initialize the base collector object.
Args:
state: a DFTimewolfState object.
critical: Whether the module is critical or not. If True and the module
encounters an error, then the whole recipe will fail.
|
def __init__(self, state, critical=False):
super(BaseModule, self).__init__()
self.critical = critical
self.state = state
| 372,137
|
Checks if any values in a given dictionary still contain @ parameters.
Args:
value: Dictionary, list, or string that will be recursively checked for
placeholders
Raises:
ValueError: There still exists a value with an @ parameter.
Returns:
Top-level caller: a modified dict with replaced tokens.
Recursive caller: a modified object with replaced tokens.
|
def check_placeholders(value):
if isinstance(value, six.string_types):
if TOKEN_REGEX.search(value):
raise ValueError('{0:s} must be replaced in dictionary'.format(value))
elif isinstance(value, list):
return [check_placeholders(item) for item in value]
elif isinstance(value, dict):
return {key: check_placeholders(val) for key, val in value.items()}
elif isinstance(value, tuple):
return tuple(check_placeholders(val) for val in value)
return value
| 372,139
|
Returns a foreign function exported by `zbar`.
Args:
fname (:obj:`str`): Name of the exported function as string.
restype (:obj:): Return type - one of the `ctypes` primitive C data
types.
*args: Arguments - a sequence of `ctypes` primitive C data types.
Returns:
cddl.CFunctionType: A wrapper around the function.
|
def zbar_function(fname, restype, *args):
prototype = CFUNCTYPE(restype, *args)
return prototype((fname, load_libzbar()))
| 372,417
|
Computes the bounding box of an iterable of (x, y) coordinates.
Args:
locations: iterable of (x, y) tuples.
Returns:
`Rect`: Coordinates of the bounding box.
|
def bounding_box(locations):
x_values = list(map(itemgetter(0), locations))
x_min, x_max = min(x_values), max(x_values)
y_values = list(map(itemgetter(1), locations))
y_min, y_max = min(y_values), max(y_values)
return Rect(x_min, y_min, x_max - x_min, y_max - y_min)
| 372,418
|
Computes the convex hull of an iterable of (x, y) coordinates.
Args:
points: iterable of (x, y) tuples.
Returns:
`list`: instances of `Point` - vertices of the convex hull in
counter-clockwise order, starting from the vertex with the
lexicographically smallest coordinates.
Andrew's monotone chain algorithm. O(n log n) complexity.
https://en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain
|
def convex_hull(points):
def is_not_clockwise(p0, p1, p2):
return 0 <= (
(p1[0] - p0[0]) * (p2[1] - p0[1]) -
(p1[1] - p0[1]) * (p2[0] - p0[0])
)
def go(points_):
res = []
for p in points_:
while 1 < len(res) and is_not_clockwise(res[-2], res[-1], p):
res.pop()
res.append(p)
# The last point in each list is the first point in the other list
res.pop()
return res
# Discard duplicates and sort by x then y
points = sorted(set(points))
# Algorithm needs at least two points
hull = (
points if len(points) < 2 else chain(go(points), go(reversed(points)))
)
return list(map(Point._make, hull))
| 372,419
|
Generator of symbols.
Args:
image: `zbar_image`
Yields:
POINTER(zbar_symbol): Symbol
|
def _symbols_for_image(image):
symbol = zbar_image_first_symbol(image)
while symbol:
yield symbol
symbol = zbar_symbol_next(symbol)
| 372,420
|
Generator of decoded symbol information.
Args:
symbols: iterable of instances of `POINTER(zbar_symbol)`
Yields:
Decoded: decoded symbol
|
def _decode_symbols(symbols):
for symbol in symbols:
data = string_at(zbar_symbol_get_data(symbol))
# The 'type' int in a value in the ZBarSymbol enumeration
symbol_type = ZBarSymbol(symbol.contents.type).name
polygon = convex_hull(
(
zbar_symbol_get_loc_x(symbol, index),
zbar_symbol_get_loc_y(symbol, index)
)
for index in _RANGEFN(zbar_symbol_get_loc_size(symbol))
)
yield Decoded(
data=data,
type=symbol_type,
rect=bounding_box(polygon),
polygon=polygon
)
| 372,421
|
Decodes datamatrix barcodes in `image`.
Args:
image: `numpy.ndarray`, `PIL.Image` or tuple (pixels, width, height)
symbols: iter(ZBarSymbol) the symbol types to decode; if `None`, uses
`zbar`'s default behaviour, which is to decode all symbol types.
Returns:
:obj:`list` of :obj:`Decoded`: The values decoded from barcodes.
|
def decode(image, symbols=None):
pixels, width, height = _pixel_data(image)
results = []
with _image_scanner() as scanner:
if symbols:
# Disable all but the symbols of interest
disable = set(ZBarSymbol).difference(symbols)
for symbol in disable:
zbar_image_scanner_set_config(
scanner, symbol, ZBarConfig.CFG_ENABLE, 0
)
# I think it likely that zbar will detect all symbol types by
# default, in which case enabling the types of interest is
# redundant but it seems sensible to be over-cautious and enable
# them.
for symbol in symbols:
zbar_image_scanner_set_config(
scanner, symbol, ZBarConfig.CFG_ENABLE, 1
)
with _image() as img:
zbar_image_set_format(img, _FOURCC['L800'])
zbar_image_set_size(img, width, height)
zbar_image_set_data(img, cast(pixels, c_void_p), len(pixels), None)
decoded = zbar_scan_image(scanner, img)
if decoded < 0:
raise PyZbarError('Unsupported image format')
else:
results.extend(_decode_symbols(_symbols_for_image(img)))
return results
| 372,423
|
Estimate an approximation of the ratio of stationary over empirical distribution from the basis.
Parameters:
-----------
K0, ndarray(M+1, M+1),
time-lagged correlation matrix for the whitened and padded data set.
Returns:
--------
u : ndarray(M,)
coefficients of the ratio stationary / empirical dist. from the whitened and expanded basis.
|
def _compute_u(K):
M = K.shape[0] - 1
# Compute right and left eigenvectors:
l, U = scl.eig(K.T)
l, U = sort_by_norm(l, U)
# Extract the eigenvector for eigenvalue one and normalize:
u = np.real(U[:, 0])
v = np.zeros(M+1)
v[M] = 1.0
u = u / np.dot(u, v)
return u
| 372,587
|
Return covariance matrix:
Parameters:
-----------
bessel : bool, optional, default=True
Use Bessel's correction in order to
obtain an unbiased estimator of sample covariances.
|
def covar(self, bessel=True):
if bessel:
return self.Mxy/ (self.w-1)
else:
return self.Mxy / self.w
| 372,735
|
start a drag operation with a PandasCellPayload on defined index.
Args:
index (QModelIndex): model index you want to start the drag operation.
|
def startDrag(self, index):
if not index.isValid():
return
dataFrame = self.model().dataFrame()
# get all infos from dataFrame
dfindex = dataFrame.iloc[[index.row()]].index
columnName = dataFrame.columns[index.column()]
dtype = dataFrame[columnName].dtype
value = dataFrame[columnName][dfindex]
# create the mime data
mimePayload = PandasCellPayload(
dfindex,
columnName,
value,
dtype,
hex(id(self.model()))
)
mimeData = MimeData()
mimeData.setData(mimePayload)
# create the drag icon and start drag operation
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
pixmap = QtGui.QPixmap(":/icons/insert-table.png")
drag.setHotSpot(QtCore.QPoint(pixmap.width()/3, pixmap.height()/3))
drag.setPixmap(pixmap)
result = drag.start(Qt.MoveAction)
| 373,440
|
Constructs the object with the given parent.
Args:
parent (QObject, optional): Causes the objected to be owned
by `parent` instead of Qt. Defaults to `None`.
iconSize (QSize, optional): Size of edit buttons. Defaults to QSize(36, 36).
|
def __init__(self, parent=None, iconSize=QtCore.QSize(36, 36)):
super(DataTableWidget, self).__init__(parent)
self._iconSize = iconSize
self.initUi()
| 373,442
|
Enable the editing buttons to add/remove rows/columns and to edit the data.
This method is also a slot.
In addition, the data of model will be made editable,
if the `enabled` parameter is true.
Args:
enabled (bool): This flag indicates, if the buttons
shall be activated.
|
def enableEditing(self, enabled):
for button in self.buttons[1:]:
button.setEnabled(enabled)
if button.isChecked():
button.setChecked(False)
model = self.tableView.model()
if model is not None:
model.enableEditing(enabled)
| 373,444
|
Adds a column with the given parameters to the underlying model
This method is also a slot.
If no model is set, nothing happens.
Args:
columnName (str): The name of the new column.
dtype (numpy.dtype): The datatype of the new column.
defaultValue (object): Fill the column with this value.
|
def addColumn(self, columnName, dtype, defaultValue):
model = self.tableView.model()
if model is not None:
model.addDataFrameColumn(columnName, dtype, defaultValue)
self.addColumnButton.setChecked(False)
| 373,446
|
Display the dialog to add a column to the model.
This method is also a slot.
Args:
triggered (bool): If the corresponding button was
activated, the dialog will be created and shown.
|
def showAddColumnDialog(self, triggered):
if triggered:
dialog = AddAttributesDialog(self)
dialog.accepted.connect(self.addColumn)
dialog.rejected.connect(self.uncheckButton)
dialog.show()
| 373,447
|
Adds a row to the model.
This method is also a slot.
Args:
triggered (bool): If the corresponding button was
activated, the row will be appended to the end.
|
def addRow(self, triggered):
if triggered:
model = self.tableView.model()
model.addDataFrameRows()
self.sender().setChecked(False)
| 373,448
|
Removes a row to the model.
This method is also a slot.
Args:
triggered (bool): If the corresponding button was
activated, the selected row will be removed
from the model.
|
def removeRow(self, triggered):
if triggered:
model = self.tableView.model()
selection = self.tableView.selectedIndexes()
rows = [index.row() for index in selection]
model.removeDataFrameRows(set(rows))
self.sender().setChecked(False)
| 373,449
|
Removes one or multiple columns from the model.
This method is also a slot.
Args:
columnNames (list): A list of columns, which shall
be removed from the model.
|
def removeColumns(self, columnNames):
model = self.tableView.model()
if model is not None:
model.removeDataFrameColumns(columnNames)
self.removeColumnButton.setChecked(False)
| 373,450
|
Display the dialog to remove column(s) from the model.
This method is also a slot.
Args:
triggered (bool): If the corresponding button was
activated, the dialog will be created and shown.
|
def showRemoveColumnDialog(self, triggered):
if triggered:
model = self.tableView.model()
if model is not None:
columns = model.dataFrameColumns()
dialog = RemoveAttributesDialog(columns, self)
dialog.accepted.connect(self.removeColumns)
dialog.rejected.connect(self.uncheckButton)
dialog.show()
| 373,451
|
Sets the model for the enclosed TableView in this widget.
Args:
model (DataFrameModel): The model to be displayed by
the Table View.
|
def setViewModel(self, model):
if isinstance(model, DataFrameModel):
self.enableEditing(False)
self.uncheckButton()
selectionModel = self.tableView.selectionModel()
self.tableView.setModel(model)
model.dtypeChanged.connect(self.updateDelegate)
model.dataChanged.connect(self.updateDelegates)
del selectionModel
| 373,452
|
Create a new thread for given worker.
Args:
parent (QObject): parent of thread and worker.
worker (ProgressWorker): worker to use in thread.
deleteWorkerLater (bool, optional): delete the worker if thread finishes.
Returns:
QThread
|
def createThread(parent, worker, deleteWorkerLater=False):
thread = QtCore.QThread(parent)
thread.started.connect(worker.doWork)
worker.finished.connect(thread.quit)
if deleteWorkerLater:
thread.finished.connect(worker.deleteLater)
worker.moveToThread(thread)
worker.setParent(parent)
return thread
| 373,454
|
Worker object that will be passed to the thread.
Args:
name (str): name shown in progress ui.
|
def __init__(self, name):
super(ProgressWorker, self).__init__()
self.name = name
| 373,455
|
Constructs the object with the given parent.
Args:
parent (QtCore.QObject, optional): Causes the objected to be owned
by `parent` instead of Qt. Defaults to `None`.
|
def __init__(self, parent=None):
super(SupportedDtypesTranslator, self).__init__(parent)
# we are not supposed to use str objects (str/ dtype('S'))
self._strs = [(np.dtype(object), self.tr('text'))]
self._ints = [(np.dtype(np.int8), self.tr('small integer (8 bit)')),
(np.dtype(np.int16), self.tr('small integer (16 bit)')),
(np.dtype(np.int32), self.tr('integer (32 bit)')),
(np.dtype(np.int64), self.tr('integer (64 bit)'))]
self._uints = [(np.dtype(np.uint8), self.tr('unsigned small integer (8 bit)')),
(np.dtype(np.uint16), self.tr('unsigned small integer (16 bit)')),
(np.dtype(np.uint32), self.tr('unsigned integer (32 bit)')),
(np.dtype(np.uint64), self.tr('unsigned integer (64 bit)'))]
self._floats = [(np.dtype(np.float16), self.tr('floating point number (16 bit)')),
(np.dtype(np.float32), self.tr('floating point number (32 bit)')),
(np.dtype(np.float64), self.tr('floating point number (64 bit)'))]
self._datetime = [(np.dtype('<M8[ns]'), self.tr('date and time'))]
self._bools = [(np.dtype(bool), self.tr('true/false value'))]
self._all = self._strs + self._ints + self._uints + self._floats + self._bools + self._datetime
| 373,456
|
Gets the datatype for the given `value` (description).
Args:
value (str): A text description for any datatype.
Returns:
numpy.dtype: The matching datatype for the given text.
None: If no match can be found, `None` will be returned.
|
def dtype(self, value):
for (dtype, string) in self._all:
if string == value:
return dtype
return None
| 373,458
|
Fill all NaN/NaT values of a column with an empty string
Args:
column (pandas.Series): A Series object with all rows.
Returns:
column: Series with filled NaN values.
|
def fillNoneValues(column):
if column.dtype == object:
column.fillna('', inplace=True)
return column
| 373,461
|
Convert a dtype of a given column to a datetime.
This method tries to do this by brute force.
Args:
column (pandas.Series): A Series object with all rows.
Returns:
column: Converted to datetime if no errors occured, else the
original column will be returned.
|
def convertTimestamps(column):
tempColumn = column
try:
# Try to convert the first row and a random row instead of the complete
# column, might be faster
# tempValue = np.datetime64(column[0])
tempValue = np.datetime64(column[randint(0, len(column.index) - 1)])
tempColumn = column.apply(to_datetime)
except Exception:
pass
return tempColumn
| 373,462
|
construct a new instance of a BigIntSpinboxDelegate.
Args:
maximum (int or long, optional): minimum allowed number in BigIntSpinbox. defaults to -18446744073709551616.
minimum (int or long, optional): maximum allowed number in BigIntSpinbox. defaults to 18446744073709551615.
singleStep (int, optional): amount of steps to stepUp BigIntSpinbox. defaults to 1.
|
def __init__(self, minimum=-18446744073709551616, maximum=18446744073709551615, singleStep=1, parent=None):
super(BigIntSpinboxDelegate, self).__init__(parent)
self.minimum = minimum
self.maximum = maximum
self.singleStep = singleStep
| 373,471
|
Returns the widget used to edit the item specified by index for editing. The parent widget and style option are used to control how the editor widget appears.
Args:
parent (QWidget): parent widget.
option (QStyleOptionViewItem): controls how editor widget appears.
index (QModelIndex): model data index.
|
def createEditor(self, parent, option, index):
editor = BigIntSpinbox(parent)
try:
editor.setMinimum(self.minimum)
editor.setMaximum(self.maximum)
editor.setSingleStep(self.singleStep)
except TypeError as err:
# initiate the editor with default values
pass
return editor
| 373,472
|
Sets the data to be displayed and edited by the editor from the data model item specified by the model index.
Args:
spinBox (BigIntSpinbox): editor widget.
index (QModelIndex): model data index.
|
def setEditorData(self, spinBox, index):
if index.isValid():
value = index.model().data(index, QtCore.Qt.EditRole)
spinBox.setValue(value)
| 373,473
|
construct a new instance of a CustomDoubleSpinboxDelegate.
Args:
maximum (float): minimum allowed number in QDoubleSpinBox.
minimum (float): maximum allowed number in QDoubleSpinBox.
singleStep (int, optional): amount of steps to stepUp QDoubleSpinBox. defaults to 0.1.
decimals (int, optional): decimals to use. defaults to 2.
|
def __init__(self, minimum, maximum, decimals=2, singleStep=0.1, parent=None):
super(CustomDoubleSpinboxDelegate, self).__init__(parent)
self.minimum = minimum
self.maximum = maximum
self.decimals = decimals
self.singleStep = singleStep
| 373,474
|
Returns the widget used to edit the item specified by index for editing. The parent widget and style option are used to control how the editor widget appears.
Args:
parent (QWidget): parent widget.
option (QStyleOptionViewItem): controls how editor widget appears.
index (QModelIndex): model data index.
|
def createEditor(self, parent, option, index):
editor = QtGui.QDoubleSpinBox(parent)
try:
editor.setMinimum(self.minimum)
editor.setMaximum(self.maximum)
editor.setSingleStep(self.singleStep)
editor.setDecimals(self.decimals)
except TypeError as err:
# initiate the spinbox with default values.
pass
return editor
| 373,475
|
Gets data from the editor widget and stores it in the specified model at the item index.
Args:
spinBox (QDoubleSpinBox): editor widget.
model (QAbstractItemModel): parent model.
index (QModelIndex): model data index.
|
def setModelData(self, spinBox, model, index):
spinBox.interpretText()
value = spinBox.value()
model.setData(index, value, QtCore.Qt.EditRole)
| 373,476
|
Returns the widget used to edit the item specified by index for editing. The parent widget and style option are used to control how the editor widget appears.
Args:
parent (QWidget): parent widget.
option (QStyleOptionViewItem): controls how editor widget appears.
index (QModelIndex): model data index.
|
def createEditor(self, parent, option, index):
editor = QtGui.QLineEdit(parent)
return editor
| 373,477
|
Gets data from the editor widget and stores it in the specified model at the item index.
Args:
editor (QtGui.QLineEdit): editor widget.
model (QAbstractItemModel): parent model.
index (QModelIndex): model data index.
|
def setModelData(self, editor, model, index):
if index.isValid():
value = editor.text()
model.setData(index, value, QtCore.Qt.EditRole)
| 373,478
|
Updates the model after changing data in the editor.
Args:
editor (QtGui.QComboBox): The current editor for the item. Should be
a `QtGui.QComboBox` as defined in `createEditor`.
model (ColumnDtypeModel): The model which holds the displayed data.
index (QtCore.QModelIndex): The index of the current item of the model.
|
def setModelData(self, editor, model, index):
model.setData(index, editor.itemText(editor.currentIndex()))
| 373,481
|
create a new MimeData object.
Args:
mimeType (str): the mime type.
|
def __init__(self, mimeType=PandasCellMimeType):
super(MimeData, self).__init__()
self._mimeType = mimeType
| 373,482
|
Add some data.
Args:
data (object): Object to add as data. This object has to be pickable.
Qt objects don't work!
Raises:
TypeError if data is not pickable
|
def setData(self, data):
try:
bytestream = pickle.dumps(data)
super(MimeData, self).setData(self._mimeType, bytestream)
except TypeError:
raise TypeError(self.tr("can not pickle added data"))
except:
raise
| 373,483
|
store dataframe information in a pickable object
Args:
dfindex (pandas.index): index of the dragged data.
column (str): name of column to be dragged.
value (object): value on according position.
dtype (pandas dtype): data type of column.
parentId (str): hex(id(...)) of according DataFrameModel.
|
def __init__(self, dfindex, column, value, dtype, parentId):
super(PandasCellPayload, self).__init__()
self.dfindex = dfindex
self.column = column
self.value = value
self.dtype = dtype
self.parentId = parentId
| 373,485
|
the __init__ method.
Args:
parent (QObject): defaults to None. If parent is 0, the new widget becomes a window.
If parent is another widget, this widget becomes a child window inside parent.
The new widget is deleted when its parent is deleted.
|
def __init__(self, parent=None):
super(BigIntSpinbox, self).__init__(parent)
self._singleStep = 1
self._minimum = -18446744073709551616
self._maximum = 18446744073709551615
rx = QtCore.QRegExp("[0-9]\\d{0,20}")
validator = QtGui.QRegExpValidator(rx, self)
self._lineEdit = QtGui.QLineEdit(self)
self._lineEdit.setText('0')
self._lineEdit.setValidator(validator)
self.setLineEdit(self._lineEdit)
| 373,498
|
setter function to _lineEdit.text. Sets minimum/maximum as new value if value is out of bounds.
Args:
value (int/long): new value to set.
Returns
True if all went fine.
|
def setValue(self, value):
if value >= self.minimum() and value <= self.maximum():
self._lineEdit.setText(str(value))
elif value < self.minimum():
self._lineEdit.setText(str(self.minimum()))
elif value > self.maximum():
self._lineEdit.setText(str(self.maximum()))
return True
| 373,499
|
steps value up/down by a single step. Single step is defined in singleStep().
Args:
steps (int): positiv int steps up, negativ steps down
|
def stepBy(self, steps):
self.setValue(self.value() + steps*self.singleStep())
| 373,500
|
setter to _singleStep. converts negativ values to positiv ones.
Args:
singleStep (int): new _singleStep value. converts negativ values to positiv ones.
Raises:
TypeError: If the given argument is not an integer.
Returns:
int or long: the absolute value of the given argument.
|
def setSingleStep(self, singleStep):
if not isinstance(singleStep, int):
raise TypeError("Argument is not of type int")
# don't use negative values
self._singleStep = abs(singleStep)
return self._singleStep
| 373,502
|
setter to _minimum.
Args:
minimum (int or long): new _minimum value.
Raises:
TypeError: If the given argument is not an integer.
|
def setMinimum(self, minimum):
if not isinstance(minimum, int):
raise TypeError("Argument is not of type int or long")
self._minimum = minimum
| 373,503
|
setter to _maximum.
Args:
maximum (int or long): new _maximum value
|
def setMaximum(self, maximum):
if not isinstance(maximum, int):
raise TypeError("Argument is not of type int or long")
self._maximum = maximum
| 373,504
|
the __init__ method.
Args:
dataFrame (pandas.core.frame.DataFrame, optional): initializes the model with given DataFrame.
If none is given an empty DataFrame will be set. defaults to None.
editable (bool, optional): apply changes while changing dtype. defaults to True.
|
def __init__(self, dataFrame=None, editable=False):
super(ColumnDtypeModel, self).__init__()
self.headers = ['column', 'data type']
self._editable = editable
self._dataFrame = pandas.DataFrame()
if dataFrame is not None:
self.setDataFrame(dataFrame)
| 373,518
|
setter function to _dataFrame. Holds all data.
Note:
It's not implemented with python properties to keep Qt conventions.
Raises:
TypeError: if dataFrame is not of type pandas.core.frame.DataFrame.
Args:
dataFrame (pandas.core.frame.DataFrame): assign dataFrame to _dataFrame. Holds all the data displayed.
|
def setDataFrame(self, dataFrame):
if not isinstance(dataFrame, pandas.core.frame.DataFrame):
raise TypeError('Argument is not of type pandas.core.frame.DataFrame')
self.layoutAboutToBeChanged.emit()
self._dataFrame = dataFrame
self.layoutChanged.emit()
| 373,519
|
setter to _editable. apply changes while changing dtype.
Raises:
TypeError: if editable is not of type bool.
Args:
editable (bool): apply changes while changing dtype.
|
def setEditable(self, editable):
if not isinstance(editable, bool):
raise TypeError('Argument is not of type bool')
self._editable = editable
| 373,520
|
Returns the item flags for the given index as ored value, e.x.: Qt.ItemIsUserCheckable | Qt.ItemIsEditable
Args:
index (QtCore.QModelIndex): Index to define column and row
Returns:
for column 'column': Qt.ItemIsSelectable | Qt.ItemIsEnabled
for column 'data type': Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable
|
def flags(self, index):
if not index.isValid():
return Qt.NoItemFlags
col = index.column()
flags = Qt.ItemIsEnabled | Qt.ItemIsSelectable
if col > 0 and self.editable():
flags = Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable
return flags
| 373,523
|
Constructs a `DataSearch` object from the given attributes.
Args:
name (str): The name of the filter.
filterString (str, optional): A python expression as string.
Defaults to an empty string.
dataFrame (pandas.DataFrame, optional): The object to filter.
Defaults to an empty `DataFrame`.
|
def __init__(self, name, filterString='', dataFrame=pd.DataFrame()):
self._filterString = filterString
self._dataFrame = dataFrame
self.name = name
| 373,524
|
Filters the data by a list of indexes.
Args:
indexes (list of int): List of index numbers to return.
Returns:
list: A list containing all indexes with filtered data. Matches
will be `True`, the remaining items will be `False`. If the
dataFrame is empty, an empty list will be returned.
|
def indexSearch(self, indexes):
if not self._dataFrame.empty:
filter0 = self._dataFrame.index == -9999
for index in indexes:
filter1 = self._dataFrame.index == index
filter0 = np.logical_or(filter0, filter1)
return filter0
else:
return []
| 373,529
|
Setter to _timestampFormat. Formatting string for conversion of timestamps to QtCore.QDateTime
Raises:
AssertionError: if timestampFormat is not of type unicode.
Args:
timestampFormat (unicode): assign timestampFormat to _timestampFormat.
Formatting string for conversion of timestamps to QtCore.QDateTime. Used in data method.
|
def timestampFormat(self, timestampFormat):
if not isinstance(timestampFormat, str):
raise TypeError('not of type unicode')
#assert isinstance(timestampFormat, unicode) or timestampFormat.__class__.__name__ == "DateFormat", "not of type unicode"
self._timestampFormat = timestampFormat
| 373,535
|
Apply a filter and hide rows.
The filter must be a `DataSearch` object, which evaluates a python
expression.
If there was an error while parsing the expression, the data will remain
unfiltered.
Args:
search(qtpandas.DataSearch): data search object to use.
Raises:
TypeError: An error is raised, if the given parameter is not a
`DataSearch` object.
|
def setFilter(self, search):
if not isinstance(search, DataSearch):
raise TypeError('The given parameter must an `qtpandas.DataSearch` object')
self._search = search
self.layoutAboutToBeChanged.emit()
if self._dataFrameOriginal is not None:
self._dataFrame = self._dataFrameOriginal
self._dataFrameOriginal = self._dataFrame.copy()
self._search.setDataFrame(self._dataFrame)
searchIndex, valid = self._search.search()
if valid:
self._dataFrame = self._dataFrame[searchIndex]
self.layoutChanged.emit()
else:
self.clearFilter()
self.layoutChanged.emit()
self.dataFrameChanged.emit()
| 373,543
|
Gets the first key of all available encodings where the corresponding
value matches the comparator.
Args:
comparator (string): A view name for an encoding.
Returns:
str: A key for a specific encoding used by python.
|
def _calculateEncodingKey(comparator):
encodingName = None
for k, v in list(_encodings.items()):
if v == comparator:
encodingName = k
break
return encodingName
| 373,583
|
Constructs the object with the given parent.
Args:
parent (QObject, optional): Causes the objected to be owned
by `parent` instead of Qt. Defaults to `None`.
|
def __init__(self, parent=None):
super(DelimiterValidator, self).__init__(parent)
re = QtCore.QRegExp('\S{1}')
self.setRegExp(re)
| 373,584
|
Constructs the object with the given parent.
Args:
parent (QObject, optional): Causes the objected to be owned
by `parent` instead of Qt. Defaults to `None`.
|
def __init__(self, parent=None):
super(DelimiterSelectionWidget, self).__init__(parent)
self.semicolonRadioButton = None
self.commaRadioButton = None
self.tabRadioButton = None
self.otherRadioButton = None
self.otherSeparatorLineEdit = None
self._initUI()
| 373,585
|
Constructs the object with the given parent.
Args:
parent (QObject, optional): Causes the objected to be owned
by `parent` instead of Qt. Defaults to `None`.
|
def __init__(self, parent=None):
super(CSVImportDialog, self).__init__(parent)
self._modal = True
self._windowTitle = 'Import CSV'
self._encodingKey = None
self._filename = None
self._delimiter = None
self._header = None
# self._detector = Detector()
self._initUI()
| 373,589
|
Opens a file from the given `path` and checks the file encoding.
The file must exists on the file system and end with the extension
`.csv`. The file is read line by line until the encoding could be
guessed.
On a successfull identification, the widgets of this dialog will be
updated.
Args:
path (string): Path to a csv file on the file system.
|
def _guessEncoding(self, path):
if os.path.exists(path) and path.lower().endswith('csv'):
# encoding = self._detector.detect(path)
encoding = None
if encoding is not None:
if encoding.startswith('utf'):
encoding = encoding.replace('-', '')
encoding = encoding.replace('-','_')
viewValue = _encodings.get(encoding)
self._encodingKey = encoding
index = self._encodingComboBox.findText(viewValue.upper())
self._encodingComboBox.setCurrentIndex(index)
| 373,593
|
Changes the value of the encoding combo box to the value of given index.
This method is also a `SLOT`.
After the encoding is changed, the file will be reloaded and previewed.
Args:
index (int): An valid index of the combo box.
|
def _updateEncoding(self, index):
encoding = self._encodingComboBox.itemText(index)
encoding = encoding.lower()
self._encodingKey = _calculateEncodingKey(encoding)
self._previewFile()
| 373,594
|
Message constructor with ([buf], [field=val,...]) prototype.
Arguments:
buf -- option message buffer to decode
Optional keyword arguments corresponts to members to set (matching
fields in self.__fields__, or 'data').
|
def __init__(self, *args, **kwargs):
# create message fields
if hasattr(self, '__fields__'):
self._create_fields()
# set default lun
self.lun = self.__default_lun__
self.data = ''
if args:
self._decode(args[0])
else:
for (name, value) in kwargs.items():
self._set_field(name, value)
| 374,168
|
Create an empty table with column labels.
>>> tiles = Table(make_array('letter', 'count', 'points'))
>>> tiles
letter | count | points
Args:
``labels`` (list of strings): The column labels.
``formatter`` (Formatter): An instance of :class:`Formatter` that
formats the columns' values.
|
def __init__(self, labels=None, _deprecated=None, *, formatter=_formats.default_formatter):
self._columns = collections.OrderedDict()
self._formats = dict()
self.formatter = formatter
if _deprecated is not None:
warnings.warn("Two-argument __init__ is deprecated. Use Table().with_columns(...)", FutureWarning)
columns, labels = labels, _deprecated
columns = columns if columns is not None else []
labels = labels if labels is not None else []
assert len(labels) == len(columns), 'label/column number mismatch'
else:
labels = labels if labels is not None else []
columns = [[] for _ in labels]
self._num_rows = 0 if len(columns) is 0 else len(columns[0])
# Add each column to table
for column, label in zip(columns, labels):
self[label] = column
self.take = _RowTaker(self)
self.exclude = _RowExcluder(self)
| 375,530
|
Creates an empty table. Column labels are optional. [Deprecated]
Args:
``labels`` (None or list): If ``None``, a table with 0
columns is created.
If a list, each element is a column label in a table with
0 rows.
Returns:
A new instance of ``Table``.
|
def empty(cls, labels=None):
warnings.warn("Table.empty(labels) is deprecated. Use Table(labels)", FutureWarning)
if labels is None:
return cls()
values = [[] for label in labels]
return cls(values, labels)
| 375,531
|
Return a table with an additional row.
Args:
``row`` (sequence): A value for each column.
Raises:
``ValueError``: If the row length differs from the column count.
>>> tiles = Table(make_array('letter', 'count', 'points'))
>>> tiles.with_row(['c', 2, 3]).with_row(['d', 4, 2])
letter | count | points
c | 2 | 3
d | 4 | 2
|
def with_row(self, row):
self = self.copy()
self.append(row)
return self
| 375,570
|
Plots a normal curve with specified parameters and area below curve shaded
between ``lbound`` and ``rbound``.
Args:
``rbound`` (numeric): right boundary of shaded region
``lbound`` (numeric): left boundary of shaded region; by default is negative infinity
``mean`` (numeric): mean/expectation of normal distribution
``sd`` (numeric): standard deviation of normal distribution
|
def plot_normal_cdf(rbound=None, lbound=None, mean=0, sd=1):
shade = rbound is not None or lbound is not None
shade_left = rbound is not None and lbound is not None
inf = 3.5 * sd
step = 0.1
rlabel = rbound
llabel = lbound
if rbound is None:
rbound = inf + mean
rlabel = "$\infty$"
if lbound is None:
lbound = -inf + mean
llabel = "-$\infty$"
pdf_range = np.arange(-inf + mean, inf + mean, step)
plt.plot(pdf_range, stats.norm.pdf(pdf_range, loc=mean, scale=sd), color='k', lw=1)
cdf_range = np.arange(lbound, rbound + step, step)
if shade:
plt.fill_between(cdf_range, stats.norm.pdf(cdf_range, loc=mean, scale=sd), color='gold')
if shade_left:
cdf_range = np.arange(-inf+mean, lbound + step, step)
plt.fill_between(cdf_range, stats.norm.pdf(cdf_range, loc=mean, scale=sd), color='darkblue')
plt.ylim(0, stats.norm.pdf(0, loc=0, scale=sd) * 1.25)
plt.xlabel('z')
plt.ylabel('$\phi$(z)', rotation=90)
plt.title("Normal Curve ~ ($\mu$ = {0}, $\sigma$ = {1}) "
"{2} < z < {3}".format(mean, sd, llabel, rlabel), fontsize=16)
plt.show()
| 375,597
|
Get individual ISO transfer's setup.
Returns a list of dicts, each containing an individual ISO transfer
parameters:
- length
- actual_length
- status
(see libusb1's API documentation for their signification)
Returned list is consistent with getISOBufferList return value.
Should not be called on a submitted transfer (except for 'length'
values).
|
def getISOSetupList(self):
transfer_p = self.__transfer
transfer = transfer_p.contents
# pylint: disable=undefined-variable
if transfer.type != TRANSFER_TYPE_ISOCHRONOUS:
# pylint: enable=undefined-variable
raise TypeError(
'This method cannot be called on non-iso transfers.'
)
return [
{
'length': x.length,
'actual_length': x.actual_length,
'status': x.status,
}
for x in libusb1.get_iso_packet_list(transfer_p)
]
| 375,764
|
TODO: Implement trie lookup with edit distance
Args:
iterable(list?): key used to find what is requested this could
be a generator.
index(int): index of what is requested
gather(bool): of weather to gather or not
edit_distance(int): the distance -- currently not used
max_edit_distance(int): the max distance -- not currently used
yields:
object: yields the results of the search
|
def lookup(self, iterable, index=0, gather=False, edit_distance=0, max_edit_distance=0, match_threshold=0.0, matched_length=0):
if self.is_terminal:
if index == len(iterable) or \
(gather and index < len(iterable) and iterable[index] == ' '): # only gather on word break)
confidence = float(len(self.key) - edit_distance) / float(max(len(self.key), index))
if confidence > match_threshold:
yield {
'key': self.key,
'match': iterable[:index],
'data': self.data,
'confidence': confidence * self.weight
}
if index < len(iterable) and iterable[index] in self.children:
for result in self.children[iterable[index]]\
.lookup(iterable, index + 1, gather=gather,
edit_distance=edit_distance, max_edit_distance=max_edit_distance, matched_length=matched_length + 1):
yield result
# if there's edit distance remaining and it's possible to match a word above the confidence threshold
potential_confidence = float(index - edit_distance + (max_edit_distance - edit_distance)) / \
(float(index) + (max_edit_distance - edit_distance)) if index + max_edit_distance - edit_distance > 0 else 0.0
if edit_distance < max_edit_distance and potential_confidence > match_threshold:
for child in list(self.children):
if index >= len(iterable) or child != iterable[index]:
# substitution
for result in self.children[child]\
.lookup(iterable, index + 1, gather=gather,
edit_distance=edit_distance + 1, max_edit_distance=max_edit_distance, matched_length=matched_length):
yield result
# delete
for result in self.children[child]\
.lookup(iterable, index + 2, gather=gather,
edit_distance=edit_distance + 1, max_edit_distance=max_edit_distance, matched_length=matched_length):
yield result
# insert
for result in self.children[child]\
.lookup(iterable, index, gather=gather,
edit_distance=edit_distance + 1, max_edit_distance=max_edit_distance, matched_length=matched_length):
yield result
| 376,068
|
Insert new node into tree
Args:
iterable(hashable): key used to find in the future.
data(object): data associated with the key
index(int): an index used for insertion.
weight(float): the wait given for the item added.
|
def insert(self, iterable, index=0, data=None, weight=1.0):
if index == len(iterable):
self.is_terminal = True
self.key = iterable
self.weight = weight
if data:
self.data.add(data)
else:
if iterable[index] not in self.children:
self.children[iterable[index]] = TrieNode()
self.children[iterable[index]].insert(iterable, index + 1, data)
| 376,069
|
Init the Trie object and create root node.
Creates an Trie object with a root node with the passed in
max_edit_distance and match_threshold.
Args:
max_edit_distance(int): ?
match_threshold(int): ?
Notes:
This never seems to get called with max_edit_distance or match_threshold
|
def __init__(self, max_edit_distance=0, match_threshold=0.0):
self.root = TrieNode('root')
self.max_edit_distance = max_edit_distance
self.match_threshold = match_threshold
| 376,072
|
Used to remove from the root node
Args:
iterable(hashable): index or key used to identify
item to remove
data: data to be paired with the key
|
def remove(self, iterable, data=None):
return self.root.remove(iterable, data=data)
| 376,076
|
This is used to fine cliques and remove them from graph
Args:
graph (graph): this is the graph of verticies to search for
cliques
p (list): this is a list of the verticies to search
r (list): used by bronk for the search
x (list): used by bronk for the search
Yields:
list : found clique of the given graph and verticies
|
def bronk(r, p, x, graph):
if len(p) == 0 and len(x) == 0:
yield r
return
for vertex in p[:]:
r_new = r[::]
r_new.append(vertex)
p_new = [val for val in p if val in graph.get_neighbors_of(vertex)] # p intersects N(vertex)
x_new = [val for val in x if val in graph.get_neighbors_of(vertex)] # x intersects N(vertex)
for result in bronk(r_new, p_new, x_new, graph):
yield result
p.remove(vertex)
x.append(vertex)
| 376,077
|
Returns a key from a tag entity
Args:
tag (tag) : this is the tag selected to get the key from
entity_index (int) : this is the index of the tagged entity
Returns:
str : String representing the key for the given tagged entity.
|
def graph_key_from_tag(tag, entity_index):
start_token = tag.get('start_token')
entity = tag.get('entities', [])[entity_index]
return str(start_token) + '-' + entity.get('key') + '-' + str(entity.get('confidence'))
| 376,078
|
Used to add edges to the graph. 'a' and 'b' are vertexes and
if 'a' or 'b' doesn't exisit then the vertex is created
Args:
a (hash): is one vertex of the edge
b (hash): is another vertext of the edge
|
def add_edge(self, a, b):
neighbors_of_a = self.adjacency_lists.get(a)
if not neighbors_of_a:
neighbors_of_a = set()
self.adjacency_lists[a] = neighbors_of_a
neighbors_of_a.add(b)
neighbors_of_b = self.adjacency_lists.get(b)
if not neighbors_of_b:
neighbors_of_b = set()
self.adjacency_lists[b] = neighbors_of_b
neighbors_of_b.add(a)
| 376,079
|
Appends items or lists to the Lattice
Args:
data (item,list) : The Item or List to be added to the Lattice
|
def append(self, data):
if isinstance(data, list) and len(data) > 0:
self.nodes.append(data)
else:
self.nodes.append([data])
| 376,080
|
This is used to produce a list of lists where each each item
in that list is a diffrent combination of items from the lists
within with every combination of such values.
Args:
index (int) : the index at witch to start the list.
Note this is used only in the function as a processing
Returns:
list : is every combination.
|
def traverse(self, index=0):
if index < len(self.nodes):
for entity in self.nodes[index]:
for next_result in self.traverse(index=index+1):
if isinstance(entity, list):
yield entity + next_result
else:
yield [entity] + next_result
else:
yield []
| 376,081
|
Builds a graph from the entities included in the tags.
Note this is used internally.
Args:
tags (list): A list of the tags to include in graph
Returns:
graph : this is the resulting graph of the tagged entities.
|
def _build_graph(self, tags):
graph = SimpleGraph()
for tag_index in xrange(len(tags)):
for entity_index in xrange(len(tags[tag_index].get('entities'))):
a_entity_name = graph_key_from_tag(tags[tag_index], entity_index)
tokens = self.tokenizer.tokenize(tags[tag_index].get('entities', [])[entity_index].get('match'))
for tag in tags[tag_index + 1:]:
start_token = tag.get('start_token')
if start_token >= tags[tag_index].get('start_token') + len(tokens):
for b_entity_index in xrange(len(tag.get('entities'))):
b_entity_name = graph_key_from_tag(tag, b_entity_index)
graph.add_edge(a_entity_name, b_entity_name)
return graph
| 376,082
|
This called by expand to find cliques
Args:
tags (list): a list of the tags used to get cliques
Yields:
list : list of sorted tags by start_token this is a clique
|
def _sub_expand(self, tags):
entities = {}
graph = self._build_graph(tags)
# name entities
for tag in tags:
for entity_index in xrange(len(tag.get('entities'))):
node_name = graph_key_from_tag(tag, entity_index)
if not node_name in entities:
entities[node_name] = []
entities[node_name] += [
tag.get('entities', [])[entity_index],
tag.get('entities', [])[entity_index].get('confidence'),
tag
]
for clique in get_cliques(list(entities), graph):
result = []
for entity_name in clique:
start_token = int(entity_name.split("-")[0])
old_tag = entities[entity_name][2]
tag = {
'start_token': start_token,
'entities': [entities.get(entity_name)[0]],
'confidence': entities.get(entity_name)[1] * old_tag.get('confidence', 1.0),
'end_token': old_tag.get('end_token'),
'match': old_tag.get('entities')[0].get('match'),
'key': old_tag.get('entities')[0].get('key'),
'from_context': old_tag.get('from_context', False)
}
result.append(tag)
result = sorted(result, key=lambda e: e.get('start_token'))
yield result
| 376,083
|
This is the main function to expand tags into cliques
Args:
tags (list): a list of tags to find the cliques.
clique_scoring_func (func): a function that returns a float
value for the clique
Returns:
list : a list of cliques
|
def expand(self, tags, clique_scoring_func=None):
lattice = Lattice()
overlapping_spans = []
def end_token_index():
return max([t.get('end_token') for t in overlapping_spans])
for i in xrange(len(tags)):
tag = tags[i]
if len(overlapping_spans) > 0 and end_token_index() >= tag.get('start_token'):
overlapping_spans.append(tag)
elif len(overlapping_spans) > 1:
cliques = list(self._sub_expand(overlapping_spans))
if clique_scoring_func:
cliques = sorted(cliques, key=lambda e: -1 * clique_scoring_func(e))
lattice.append(cliques)
overlapping_spans = [tag]
else:
lattice.append(overlapping_spans)
overlapping_spans = [tag]
if len(overlapping_spans) > 1:
cliques = list(self._sub_expand(overlapping_spans))
if clique_scoring_func:
cliques = sorted(cliques, key=lambda e: -1 * clique_scoring_func(e))
lattice.append(cliques)
else:
lattice.append(overlapping_spans)
return lattice.traverse()
| 376,084
|
Using regex invokes this function, which significantly impacts performance of adapt. it is an N! operation.
Args:
tokens(list): list of tokens for Yield results.
Yields:
str: ?
|
def _iterate_subsequences(self, tokens):
for start_idx in xrange(len(tokens)):
for end_idx in xrange(start_idx + 1, len(tokens) + 1):
yield ' '.join(tokens[start_idx:end_idx]), start_idx
| 376,086
|
Initialize the IntentDeterminationEngine
Args:
tokenizer(tokenizer) : tokenizer used to break up spoken text
example EnglishTokenizer()
trie(Trie): tree of matches to Entites
|
def __init__(self, tokenizer=None, trie=None):
pyee.EventEmitter.__init__(self)
self.tokenizer = tokenizer or EnglishTokenizer()
self.trie = trie or Trie()
self.regular_expressions_entities = []
self._regex_strings = set()
self.tagger = EntityTagger(self.trie, self.tokenizer, self.regular_expressions_entities)
self.intent_parsers = []
| 376,089
|
Decide the best intent
Args:
parse_result(list): results used to match the best intent.
context(list): ?
Returns:
best_intent, best_tags:
best_intent : The best intent for given results
best_tags : The Tags for result
|
def __best_intent(self, parse_result, context=[]):
best_intent = None
best_tags = None
context_as_entities = [{'entities': [c]} for c in context]
for intent in self.intent_parsers:
i, tags = intent.validate_with_tags(parse_result.get('tags') + context_as_entities, parse_result.get('confidence'))
if not best_intent or (i and i.get('confidence') > best_intent.get('confidence')):
best_intent = i
best_tags = tags
return best_intent, best_tags
| 376,090
|
Used to get unused context from context. Any keys not in
parse_result
Args:
parse_results(list): parsed results used to identify what keys
in the context are used.
context(list): this is the context used to match with parsed results
keys missing in the parsed results are the unused context
Returns:
list: A list of the unused context results.
|
def __get_unused_context(self, parse_result, context):
tags_keys = set([t['key'] for t in parse_result['tags'] if t['from_context']])
result_context = [c for c in context if c['key'] not in tags_keys]
return result_context
| 376,091
|
Given an utterance, provide a valid intent.
Args:
utterance(str): an ascii or unicode string representing natural language speech
include_tags(list): includes the parsed tags (including position and confidence)
as part of result
context_manager(list): a context manager to provide context to the utterance
num_results(int): a maximum number of results to be returned.
Returns: A generator that yields dictionaries.
|
def determine_intent(self, utterance, num_results=1, include_tags=False, context_manager=None):
parser = Parser(self.tokenizer, self.tagger)
parser.on('tagged_entities',
(lambda result:
self.emit("tagged_entities", result)))
context = []
if context_manager:
context = context_manager.get_context()
for result in parser.parse(utterance, N=num_results, context=context):
self.emit("parse_result", result)
# create a context without entities used in result
remaining_context = self.__get_unused_context(result, context)
best_intent, tags = self.__best_intent(result, remaining_context)
if best_intent and best_intent.get('confidence', 0.0) > 0:
if include_tags:
best_intent['__tags__'] = tags
yield best_intent
| 376,092
|
Register an entity to be tagged in potential parse results
Args:
entity_value(str): the value/proper name of an entity instance (Ex: "The Big Bang Theory")
entity_type(str): the type/tag of an entity instance (Ex: "Television Show")
|
def register_entity(self, entity_value, entity_type, alias_of=None):
if alias_of:
self.trie.insert(entity_value.lower(), data=(alias_of, entity_type))
else:
self.trie.insert(entity_value.lower(), data=(entity_value, entity_type))
self.trie.insert(entity_type.lower(), data=(entity_type, 'Concept'))
| 376,093
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.