id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
7,600
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Slot._set_value
def _set_value(self, slot_record): """Sets the value of this slot based on its corresponding _SlotRecord. Does nothing if the slot has not yet been filled. Args: slot_record: The _SlotRecord containing this Slot's value. """ if slot_record.status == _SlotRecord.FILLED: self.filled = True self._filler_pipeline_key = _SlotRecord.filler.get_value_for_datastore( slot_record) self._fill_datetime = slot_record.fill_time self._value = slot_record.value
python
def _set_value(self, slot_record): if slot_record.status == _SlotRecord.FILLED: self.filled = True self._filler_pipeline_key = _SlotRecord.filler.get_value_for_datastore( slot_record) self._fill_datetime = slot_record.fill_time self._value = slot_record.value
[ "def", "_set_value", "(", "self", ",", "slot_record", ")", ":", "if", "slot_record", ".", "status", "==", "_SlotRecord", ".", "FILLED", ":", "self", ".", "filled", "=", "True", "self", ".", "_filler_pipeline_key", "=", "_SlotRecord", ".", "filler", ".", "g...
Sets the value of this slot based on its corresponding _SlotRecord. Does nothing if the slot has not yet been filled. Args: slot_record: The _SlotRecord containing this Slot's value.
[ "Sets", "the", "value", "of", "this", "slot", "based", "on", "its", "corresponding", "_SlotRecord", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L247-L260
7,601
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
PipelineFuture._inherit_outputs
def _inherit_outputs(self, pipeline_name, already_defined, resolve_outputs=False): """Inherits outputs from a calling Pipeline. Args: pipeline_name: The Pipeline class name (used for debugging). already_defined: Maps output name to stringified db.Key (of _SlotRecords) of any exiting output slots to be inherited by this future. resolve_outputs: When True, this method will dereference all output slots before returning back to the caller, making those output slots' values available. Raises: UnexpectedPipelineError when resolve_outputs is True and any of the output slots could not be retrived from the Datastore. """ for name, slot_key in already_defined.iteritems(): if not isinstance(slot_key, db.Key): slot_key = db.Key(slot_key) slot = self._output_dict.get(name) if slot is None: if self._strict: raise UnexpectedPipelineError( 'Inherited output named "%s" must be filled but ' 'not declared for pipeline class "%s"' % (name, pipeline_name)) else: self._output_dict[name] = Slot(name=name, slot_key=slot_key) else: slot.key = slot_key slot._exists = True if resolve_outputs: slot_key_dict = dict((s.key, s) for s in self._output_dict.itervalues()) all_slots = db.get(slot_key_dict.keys()) for slot, slot_record in zip(slot_key_dict.itervalues(), all_slots): if slot_record is None: raise UnexpectedPipelineError( 'Inherited output named "%s" for pipeline class "%s" is ' 'missing its Slot in the datastore: "%s"' % (slot.name, pipeline_name, slot.key)) slot = slot_key_dict[slot_record.key()] slot._set_value(slot_record)
python
def _inherit_outputs(self, pipeline_name, already_defined, resolve_outputs=False): for name, slot_key in already_defined.iteritems(): if not isinstance(slot_key, db.Key): slot_key = db.Key(slot_key) slot = self._output_dict.get(name) if slot is None: if self._strict: raise UnexpectedPipelineError( 'Inherited output named "%s" must be filled but ' 'not declared for pipeline class "%s"' % (name, pipeline_name)) else: self._output_dict[name] = Slot(name=name, slot_key=slot_key) else: slot.key = slot_key slot._exists = True if resolve_outputs: slot_key_dict = dict((s.key, s) for s in self._output_dict.itervalues()) all_slots = db.get(slot_key_dict.keys()) for slot, slot_record in zip(slot_key_dict.itervalues(), all_slots): if slot_record is None: raise UnexpectedPipelineError( 'Inherited output named "%s" for pipeline class "%s" is ' 'missing its Slot in the datastore: "%s"' % (slot.name, pipeline_name, slot.key)) slot = slot_key_dict[slot_record.key()] slot._set_value(slot_record)
[ "def", "_inherit_outputs", "(", "self", ",", "pipeline_name", ",", "already_defined", ",", "resolve_outputs", "=", "False", ")", ":", "for", "name", ",", "slot_key", "in", "already_defined", ".", "iteritems", "(", ")", ":", "if", "not", "isinstance", "(", "s...
Inherits outputs from a calling Pipeline. Args: pipeline_name: The Pipeline class name (used for debugging). already_defined: Maps output name to stringified db.Key (of _SlotRecords) of any exiting output slots to be inherited by this future. resolve_outputs: When True, this method will dereference all output slots before returning back to the caller, making those output slots' values available. Raises: UnexpectedPipelineError when resolve_outputs is True and any of the output slots could not be retrived from the Datastore.
[ "Inherits", "outputs", "from", "a", "calling", "Pipeline", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L314-L358
7,602
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.from_id
def from_id(cls, pipeline_id, resolve_outputs=True, _pipeline_record=None): """Returns an instance corresponding to an existing Pipeline. The returned object will have the same properties a Pipeline does while it's running synchronously (e.g., like what it's first allocated), allowing callers to inspect caller arguments, outputs, fill slots, complete the pipeline, abort, retry, etc. Args: pipeline_id: The ID of this pipeline (a string). resolve_outputs: When True, dereference the outputs of this Pipeline so their values can be accessed by the caller. _pipeline_record: Internal-only. The _PipelineRecord instance to use to instantiate this instance instead of fetching it from the datastore. Returns: Pipeline sub-class instances or None if it could not be found. """ pipeline_record = _pipeline_record # Support pipeline IDs and idempotence_keys that are not unicode. if not isinstance(pipeline_id, unicode): try: pipeline_id = pipeline_id.encode('utf-8') except UnicodeDecodeError: pipeline_id = hashlib.sha1(pipeline_id).hexdigest() pipeline_key = db.Key.from_path(_PipelineRecord.kind(), pipeline_id) if pipeline_record is None: pipeline_record = db.get(pipeline_key) if pipeline_record is None: return None try: pipeline_func_class = mr_util.for_name(pipeline_record.class_path) except ImportError, e: logging.warning('Tried to find Pipeline %s#%s, but class could ' 'not be found. Using default Pipeline class instead.', pipeline_record.class_path, pipeline_id) pipeline_func_class = cls params = pipeline_record.params arg_list, kwarg_dict = _dereference_args( pipeline_record.class_path, params['args'], params['kwargs']) outputs = PipelineFuture(pipeline_func_class.output_names) outputs._inherit_outputs( pipeline_record.class_path, params['output_slots'], resolve_outputs=resolve_outputs) stage = pipeline_func_class(*arg_list, **kwarg_dict) stage.backoff_seconds = params['backoff_seconds'] stage.backoff_factor = params['backoff_factor'] stage.max_attempts = params['max_attempts'] stage.task_retry = params['task_retry'] stage.target = params.get('target') # May not be defined for old Pipelines stage._current_attempt = pipeline_record.current_attempt stage._set_values_internal( _PipelineContext('', params['queue_name'], params['base_path']), pipeline_key, _PipelineRecord.root_pipeline.get_value_for_datastore(pipeline_record), outputs, pipeline_record.status) return stage
python
def from_id(cls, pipeline_id, resolve_outputs=True, _pipeline_record=None): pipeline_record = _pipeline_record # Support pipeline IDs and idempotence_keys that are not unicode. if not isinstance(pipeline_id, unicode): try: pipeline_id = pipeline_id.encode('utf-8') except UnicodeDecodeError: pipeline_id = hashlib.sha1(pipeline_id).hexdigest() pipeline_key = db.Key.from_path(_PipelineRecord.kind(), pipeline_id) if pipeline_record is None: pipeline_record = db.get(pipeline_key) if pipeline_record is None: return None try: pipeline_func_class = mr_util.for_name(pipeline_record.class_path) except ImportError, e: logging.warning('Tried to find Pipeline %s#%s, but class could ' 'not be found. Using default Pipeline class instead.', pipeline_record.class_path, pipeline_id) pipeline_func_class = cls params = pipeline_record.params arg_list, kwarg_dict = _dereference_args( pipeline_record.class_path, params['args'], params['kwargs']) outputs = PipelineFuture(pipeline_func_class.output_names) outputs._inherit_outputs( pipeline_record.class_path, params['output_slots'], resolve_outputs=resolve_outputs) stage = pipeline_func_class(*arg_list, **kwarg_dict) stage.backoff_seconds = params['backoff_seconds'] stage.backoff_factor = params['backoff_factor'] stage.max_attempts = params['max_attempts'] stage.task_retry = params['task_retry'] stage.target = params.get('target') # May not be defined for old Pipelines stage._current_attempt = pipeline_record.current_attempt stage._set_values_internal( _PipelineContext('', params['queue_name'], params['base_path']), pipeline_key, _PipelineRecord.root_pipeline.get_value_for_datastore(pipeline_record), outputs, pipeline_record.status) return stage
[ "def", "from_id", "(", "cls", ",", "pipeline_id", ",", "resolve_outputs", "=", "True", ",", "_pipeline_record", "=", "None", ")", ":", "pipeline_record", "=", "_pipeline_record", "# Support pipeline IDs and idempotence_keys that are not unicode.", "if", "not", "isinstance...
Returns an instance corresponding to an existing Pipeline. The returned object will have the same properties a Pipeline does while it's running synchronously (e.g., like what it's first allocated), allowing callers to inspect caller arguments, outputs, fill slots, complete the pipeline, abort, retry, etc. Args: pipeline_id: The ID of this pipeline (a string). resolve_outputs: When True, dereference the outputs of this Pipeline so their values can be accessed by the caller. _pipeline_record: Internal-only. The _PipelineRecord instance to use to instantiate this instance instead of fetching it from the datastore. Returns: Pipeline sub-class instances or None if it could not be found.
[ "Returns", "an", "instance", "corresponding", "to", "an", "existing", "Pipeline", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L544-L609
7,603
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.start
def start(self, idempotence_key='', queue_name='default', base_path='/_ah/pipeline', return_task=False, countdown=None, eta=None): """Starts a new instance of this pipeline. Args: idempotence_key: The ID to use for this Pipeline and throughout its asynchronous workflow to ensure the operations are idempotent. If empty a starting key will be automatically assigned. queue_name: What queue this Pipeline's workflow should execute on. base_path: The relative URL path to where the Pipeline API is mounted for access by the taskqueue API or external requests. return_task: When True, a task to start this pipeline will be returned instead of submitted, allowing the caller to start off this pipeline as part of a separate transaction (potentially leaving this newly allocated pipeline's datastore entities in place if that separate transaction fails for any reason). countdown: Time in seconds into the future that this Task should execute. Defaults to zero. eta: A datetime.datetime specifying the absolute time at which the task should be executed. Must not be specified if 'countdown' is specified. This may be timezone-aware or timezone-naive. If None, defaults to now. For pull tasks, no worker will be able to lease this task before the time indicated by eta. Returns: A taskqueue.Task instance if return_task was True. This task will *not* have a name, thus to ensure reliable execution of your pipeline you should add() this task as part of a separate Datastore transaction. Raises: PipelineExistsError if the pipeline with the given idempotence key exists. PipelineSetupError if the pipeline could not start for any other reason. """ if not idempotence_key: idempotence_key = uuid.uuid4().hex elif not isinstance(idempotence_key, unicode): try: idempotence_key.encode('utf-8') except UnicodeDecodeError: idempotence_key = hashlib.sha1(idempotence_key).hexdigest() pipeline_key = db.Key.from_path(_PipelineRecord.kind(), idempotence_key) context = _PipelineContext('', queue_name, base_path) future = PipelineFuture(self.output_names, force_strict=True) try: self._set_values_internal( context, pipeline_key, pipeline_key, future, _PipelineRecord.WAITING) return context.start( self, return_task=return_task, countdown=countdown, eta=eta) except Error: # Pass through exceptions that originate in this module. raise except Exception, e: # Re-type any exceptions that were raised in dependent methods. raise PipelineSetupError('Error starting %s#%s: %s' % ( self, idempotence_key, str(e)))
python
def start(self, idempotence_key='', queue_name='default', base_path='/_ah/pipeline', return_task=False, countdown=None, eta=None): if not idempotence_key: idempotence_key = uuid.uuid4().hex elif not isinstance(idempotence_key, unicode): try: idempotence_key.encode('utf-8') except UnicodeDecodeError: idempotence_key = hashlib.sha1(idempotence_key).hexdigest() pipeline_key = db.Key.from_path(_PipelineRecord.kind(), idempotence_key) context = _PipelineContext('', queue_name, base_path) future = PipelineFuture(self.output_names, force_strict=True) try: self._set_values_internal( context, pipeline_key, pipeline_key, future, _PipelineRecord.WAITING) return context.start( self, return_task=return_task, countdown=countdown, eta=eta) except Error: # Pass through exceptions that originate in this module. raise except Exception, e: # Re-type any exceptions that were raised in dependent methods. raise PipelineSetupError('Error starting %s#%s: %s' % ( self, idempotence_key, str(e)))
[ "def", "start", "(", "self", ",", "idempotence_key", "=", "''", ",", "queue_name", "=", "'default'", ",", "base_path", "=", "'/_ah/pipeline'", ",", "return_task", "=", "False", ",", "countdown", "=", "None", ",", "eta", "=", "None", ")", ":", "if", "not"...
Starts a new instance of this pipeline. Args: idempotence_key: The ID to use for this Pipeline and throughout its asynchronous workflow to ensure the operations are idempotent. If empty a starting key will be automatically assigned. queue_name: What queue this Pipeline's workflow should execute on. base_path: The relative URL path to where the Pipeline API is mounted for access by the taskqueue API or external requests. return_task: When True, a task to start this pipeline will be returned instead of submitted, allowing the caller to start off this pipeline as part of a separate transaction (potentially leaving this newly allocated pipeline's datastore entities in place if that separate transaction fails for any reason). countdown: Time in seconds into the future that this Task should execute. Defaults to zero. eta: A datetime.datetime specifying the absolute time at which the task should be executed. Must not be specified if 'countdown' is specified. This may be timezone-aware or timezone-naive. If None, defaults to now. For pull tasks, no worker will be able to lease this task before the time indicated by eta. Returns: A taskqueue.Task instance if return_task was True. This task will *not* have a name, thus to ensure reliable execution of your pipeline you should add() this task as part of a separate Datastore transaction. Raises: PipelineExistsError if the pipeline with the given idempotence key exists. PipelineSetupError if the pipeline could not start for any other reason.
[ "Starts", "a", "new", "instance", "of", "this", "pipeline", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L613-L673
7,604
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.retry
def retry(self, retry_message=''): """Forces a currently running asynchronous pipeline to retry. Note this may not be called by synchronous or generator pipelines. Those must instead raise the 'Retry' exception during execution. Args: retry_message: Optional message explaining why the retry happened. Returns: True if the Pipeline should be retried, False if it cannot be cancelled mid-flight for some reason. """ if not self.async: raise UnexpectedPipelineError( 'May only call retry() method for asynchronous pipelines.') if self.try_cancel(): self._context.transition_retry(self._pipeline_key, retry_message) return True else: return False
python
def retry(self, retry_message=''): if not self.async: raise UnexpectedPipelineError( 'May only call retry() method for asynchronous pipelines.') if self.try_cancel(): self._context.transition_retry(self._pipeline_key, retry_message) return True else: return False
[ "def", "retry", "(", "self", ",", "retry_message", "=", "''", ")", ":", "if", "not", "self", ".", "async", ":", "raise", "UnexpectedPipelineError", "(", "'May only call retry() method for asynchronous pipelines.'", ")", "if", "self", ".", "try_cancel", "(", ")", ...
Forces a currently running asynchronous pipeline to retry. Note this may not be called by synchronous or generator pipelines. Those must instead raise the 'Retry' exception during execution. Args: retry_message: Optional message explaining why the retry happened. Returns: True if the Pipeline should be retried, False if it cannot be cancelled mid-flight for some reason.
[ "Forces", "a", "currently", "running", "asynchronous", "pipeline", "to", "retry", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L693-L713
7,605
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.abort
def abort(self, abort_message=''): """Mark the entire pipeline up to the root as aborted. Note this should only be called from *outside* the context of a running pipeline. Synchronous and generator pipelines should raise the 'Abort' exception to cause this behavior during execution. Args: abort_message: Optional message explaining why the abort happened. Returns: True if the abort signal was sent successfully; False if the pipeline could not be aborted for any reason. """ # TODO: Use thread-local variable to enforce that this is not called # while a pipeline is executing in the current thread. if (self.async and self._root_pipeline_key == self._pipeline_key and not self.try_cancel()): # Handle the special case where the root pipeline is async and thus # cannot be aborted outright. return False else: return self._context.begin_abort( self._root_pipeline_key, abort_message=abort_message)
python
def abort(self, abort_message=''): # TODO: Use thread-local variable to enforce that this is not called # while a pipeline is executing in the current thread. if (self.async and self._root_pipeline_key == self._pipeline_key and not self.try_cancel()): # Handle the special case where the root pipeline is async and thus # cannot be aborted outright. return False else: return self._context.begin_abort( self._root_pipeline_key, abort_message=abort_message)
[ "def", "abort", "(", "self", ",", "abort_message", "=", "''", ")", ":", "# TODO: Use thread-local variable to enforce that this is not called", "# while a pipeline is executing in the current thread.", "if", "(", "self", ".", "async", "and", "self", ".", "_root_pipeline_key",...
Mark the entire pipeline up to the root as aborted. Note this should only be called from *outside* the context of a running pipeline. Synchronous and generator pipelines should raise the 'Abort' exception to cause this behavior during execution. Args: abort_message: Optional message explaining why the abort happened. Returns: True if the abort signal was sent successfully; False if the pipeline could not be aborted for any reason.
[ "Mark", "the", "entire", "pipeline", "up", "to", "the", "root", "as", "aborted", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L715-L738
7,606
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.fill
def fill(self, name_or_slot, value): """Fills an output slot required by this Pipeline. Args: name_or_slot: The name of the slot (a string) or Slot record to fill. value: The serializable value to assign to this slot. Raises: UnexpectedPipelineError if the Slot no longer exists. SlotNotDeclaredError if trying to output to a slot that was not declared ahead of time. """ if isinstance(name_or_slot, basestring): slot = getattr(self.outputs, name_or_slot) elif isinstance(name_or_slot, Slot): slot = name_or_slot else: raise UnexpectedPipelineError( 'Could not fill invalid output name: %r' % name_or_slot) if not slot._exists: raise SlotNotDeclaredError( 'Cannot fill output with name "%s" that was just ' 'declared within the Pipeline context.' % slot.name) self._context.fill_slot(self._pipeline_key, slot, value)
python
def fill(self, name_or_slot, value): if isinstance(name_or_slot, basestring): slot = getattr(self.outputs, name_or_slot) elif isinstance(name_or_slot, Slot): slot = name_or_slot else: raise UnexpectedPipelineError( 'Could not fill invalid output name: %r' % name_or_slot) if not slot._exists: raise SlotNotDeclaredError( 'Cannot fill output with name "%s" that was just ' 'declared within the Pipeline context.' % slot.name) self._context.fill_slot(self._pipeline_key, slot, value)
[ "def", "fill", "(", "self", ",", "name_or_slot", ",", "value", ")", ":", "if", "isinstance", "(", "name_or_slot", ",", "basestring", ")", ":", "slot", "=", "getattr", "(", "self", ".", "outputs", ",", "name_or_slot", ")", "elif", "isinstance", "(", "name...
Fills an output slot required by this Pipeline. Args: name_or_slot: The name of the slot (a string) or Slot record to fill. value: The serializable value to assign to this slot. Raises: UnexpectedPipelineError if the Slot no longer exists. SlotNotDeclaredError if trying to output to a slot that was not declared ahead of time.
[ "Fills", "an", "output", "slot", "required", "by", "this", "Pipeline", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L741-L765
7,607
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.set_status
def set_status(self, message=None, console_url=None, status_links=None): """Sets the current status of this pipeline. This method is purposefully non-transactional. Updates are written to the datastore immediately and overwrite all existing statuses. Args: message: (optional) Overall status message. console_url: (optional) Relative URL to use for the "console" of this pipeline that displays current progress. When None, no console will be displayed. status_links: (optional) Dictionary of readable link names to relative URLs that should be associated with this pipeline as it runs. These links provide convenient access to other dashboards, consoles, etc associated with the pipeline. Raises: PipelineRuntimeError if the status could not be set for any reason. """ if _TEST_MODE: logging.info( 'New status for %s#%s: message=%r, console_url=%r, status_links=%r', self, self.pipeline_id, message, console_url, status_links) return status_key = db.Key.from_path(_StatusRecord.kind(), self.pipeline_id) root_pipeline_key = db.Key.from_path( _PipelineRecord.kind(), self.root_pipeline_id) status_record = _StatusRecord( key=status_key, root_pipeline=root_pipeline_key) try: if message: status_record.message = message if console_url: status_record.console_url = console_url if status_links: # Alphabeticalize the list. status_record.link_names = sorted( db.Text(s) for s in status_links.iterkeys()) status_record.link_urls = [ db.Text(status_links[name]) for name in status_record.link_names] status_record.status_time = datetime.datetime.utcnow() status_record.put() except Exception, e: raise PipelineRuntimeError('Could not set status for %s#%s: %s' % (self, self.pipeline_id, str(e)))
python
def set_status(self, message=None, console_url=None, status_links=None): if _TEST_MODE: logging.info( 'New status for %s#%s: message=%r, console_url=%r, status_links=%r', self, self.pipeline_id, message, console_url, status_links) return status_key = db.Key.from_path(_StatusRecord.kind(), self.pipeline_id) root_pipeline_key = db.Key.from_path( _PipelineRecord.kind(), self.root_pipeline_id) status_record = _StatusRecord( key=status_key, root_pipeline=root_pipeline_key) try: if message: status_record.message = message if console_url: status_record.console_url = console_url if status_links: # Alphabeticalize the list. status_record.link_names = sorted( db.Text(s) for s in status_links.iterkeys()) status_record.link_urls = [ db.Text(status_links[name]) for name in status_record.link_names] status_record.status_time = datetime.datetime.utcnow() status_record.put() except Exception, e: raise PipelineRuntimeError('Could not set status for %s#%s: %s' % (self, self.pipeline_id, str(e)))
[ "def", "set_status", "(", "self", ",", "message", "=", "None", ",", "console_url", "=", "None", ",", "status_links", "=", "None", ")", ":", "if", "_TEST_MODE", ":", "logging", ".", "info", "(", "'New status for %s#%s: message=%r, console_url=%r, status_links=%r'", ...
Sets the current status of this pipeline. This method is purposefully non-transactional. Updates are written to the datastore immediately and overwrite all existing statuses. Args: message: (optional) Overall status message. console_url: (optional) Relative URL to use for the "console" of this pipeline that displays current progress. When None, no console will be displayed. status_links: (optional) Dictionary of readable link names to relative URLs that should be associated with this pipeline as it runs. These links provide convenient access to other dashboards, consoles, etc associated with the pipeline. Raises: PipelineRuntimeError if the status could not be set for any reason.
[ "Sets", "the", "current", "status", "of", "this", "pipeline", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L767-L815
7,608
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.complete
def complete(self, default_output=None): """Marks this asynchronous Pipeline as complete. Args: default_output: What value the 'default' output slot should be assigned. Raises: UnexpectedPipelineError if the slot no longer exists or this method was called for a pipeline that is not async. """ # TODO: Enforce that all outputs expected by this async pipeline were # filled before this complete() function was called. May required all # async functions to declare their outputs upfront. if not self.async: raise UnexpectedPipelineError( 'May only call complete() method for asynchronous pipelines.') self._context.fill_slot( self._pipeline_key, self.outputs.default, default_output)
python
def complete(self, default_output=None): # TODO: Enforce that all outputs expected by this async pipeline were # filled before this complete() function was called. May required all # async functions to declare their outputs upfront. if not self.async: raise UnexpectedPipelineError( 'May only call complete() method for asynchronous pipelines.') self._context.fill_slot( self._pipeline_key, self.outputs.default, default_output)
[ "def", "complete", "(", "self", ",", "default_output", "=", "None", ")", ":", "# TODO: Enforce that all outputs expected by this async pipeline were", "# filled before this complete() function was called. May required all", "# async functions to declare their outputs upfront.", "if", "no...
Marks this asynchronous Pipeline as complete. Args: default_output: What value the 'default' output slot should be assigned. Raises: UnexpectedPipelineError if the slot no longer exists or this method was called for a pipeline that is not async.
[ "Marks", "this", "asynchronous", "Pipeline", "as", "complete", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L817-L834
7,609
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.get_callback_url
def get_callback_url(self, **kwargs): """Returns a relative URL for invoking this Pipeline's callback method. Args: kwargs: Dictionary mapping keyword argument names to single values that should be passed to the callback when it is invoked. Raises: UnexpectedPipelineError if this is invoked on pipeline that is not async. """ # TODO: Support positional parameters. if not self.async: raise UnexpectedPipelineError( 'May only call get_callback_url() method for asynchronous pipelines.') kwargs['pipeline_id'] = self._pipeline_key.name() params = urllib.urlencode(sorted(kwargs.items())) return '%s/callback?%s' % (self.base_path, params)
python
def get_callback_url(self, **kwargs): # TODO: Support positional parameters. if not self.async: raise UnexpectedPipelineError( 'May only call get_callback_url() method for asynchronous pipelines.') kwargs['pipeline_id'] = self._pipeline_key.name() params = urllib.urlencode(sorted(kwargs.items())) return '%s/callback?%s' % (self.base_path, params)
[ "def", "get_callback_url", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# TODO: Support positional parameters.", "if", "not", "self", ".", "async", ":", "raise", "UnexpectedPipelineError", "(", "'May only call get_callback_url() method for asynchronous pipelines.'", ")"...
Returns a relative URL for invoking this Pipeline's callback method. Args: kwargs: Dictionary mapping keyword argument names to single values that should be passed to the callback when it is invoked. Raises: UnexpectedPipelineError if this is invoked on pipeline that is not async.
[ "Returns", "a", "relative", "URL", "for", "invoking", "this", "Pipeline", "s", "callback", "method", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L836-L852
7,610
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.get_callback_task
def get_callback_task(self, *args, **kwargs): """Returns a task for calling back this Pipeline. Args: params: Keyword argument containing a dictionary of key/value pairs that will be passed to the callback when it is executed. args, kwargs: Passed to the taskqueue.Task constructor. Use these arguments to set the task name (for idempotence), etc. Returns: A taskqueue.Task instance that must be enqueued by the caller. """ if not self.async: raise UnexpectedPipelineError( 'May only call get_callback_task() method for asynchronous pipelines.') params = kwargs.get('params', {}) kwargs['params'] = params params['pipeline_id'] = self._pipeline_key.name() kwargs['url'] = self.base_path + '/callback' kwargs['method'] = 'POST' return taskqueue.Task(*args, **kwargs)
python
def get_callback_task(self, *args, **kwargs): if not self.async: raise UnexpectedPipelineError( 'May only call get_callback_task() method for asynchronous pipelines.') params = kwargs.get('params', {}) kwargs['params'] = params params['pipeline_id'] = self._pipeline_key.name() kwargs['url'] = self.base_path + '/callback' kwargs['method'] = 'POST' return taskqueue.Task(*args, **kwargs)
[ "def", "get_callback_task", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "async", ":", "raise", "UnexpectedPipelineError", "(", "'May only call get_callback_task() method for asynchronous pipelines.'", ")", "params", "=...
Returns a task for calling back this Pipeline. Args: params: Keyword argument containing a dictionary of key/value pairs that will be passed to the callback when it is executed. args, kwargs: Passed to the taskqueue.Task constructor. Use these arguments to set the task name (for idempotence), etc. Returns: A taskqueue.Task instance that must be enqueued by the caller.
[ "Returns", "a", "task", "for", "calling", "back", "this", "Pipeline", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L854-L875
7,611
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.cleanup
def cleanup(self): """Clean up this Pipeline and all Datastore records used for coordination. Only works when called on a root pipeline. Child pipelines will ignore calls to this method. After this method is called, Pipeline.from_id() and related status methods will return inconsistent or missing results. This method is fire-and-forget and asynchronous. """ if self._root_pipeline_key is None: raise UnexpectedPipelineError( 'Could not cleanup Pipeline with unknown root pipeline ID.') if not self.is_root: return task = taskqueue.Task( params=dict(root_pipeline_key=self._root_pipeline_key), url=self.base_path + '/cleanup', headers={'X-Ae-Pipeline-Key': self._root_pipeline_key}) taskqueue.Queue(self.queue_name).add(task)
python
def cleanup(self): if self._root_pipeline_key is None: raise UnexpectedPipelineError( 'Could not cleanup Pipeline with unknown root pipeline ID.') if not self.is_root: return task = taskqueue.Task( params=dict(root_pipeline_key=self._root_pipeline_key), url=self.base_path + '/cleanup', headers={'X-Ae-Pipeline-Key': self._root_pipeline_key}) taskqueue.Queue(self.queue_name).add(task)
[ "def", "cleanup", "(", "self", ")", ":", "if", "self", ".", "_root_pipeline_key", "is", "None", ":", "raise", "UnexpectedPipelineError", "(", "'Could not cleanup Pipeline with unknown root pipeline ID.'", ")", "if", "not", "self", ".", "is_root", ":", "return", "tas...
Clean up this Pipeline and all Datastore records used for coordination. Only works when called on a root pipeline. Child pipelines will ignore calls to this method. After this method is called, Pipeline.from_id() and related status methods will return inconsistent or missing results. This method is fire-and-forget and asynchronous.
[ "Clean", "up", "this", "Pipeline", "and", "all", "Datastore", "records", "used", "for", "coordination", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L937-L956
7,612
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.with_params
def with_params(self, **kwargs): """Modify various execution parameters of a Pipeline before it runs. This method has no effect in test mode. Args: kwargs: Attributes to modify on this Pipeline instance before it has been executed. Returns: This Pipeline instance, for easy chaining. """ if _TEST_MODE: logging.info( 'Setting runtime parameters for %s#%s: %r', self, self.pipeline_id, kwargs) return self if self.pipeline_id is not None: raise UnexpectedPipelineError( 'May only call with_params() on a Pipeline that has not yet ' 'been scheduled for execution.') ALLOWED = ('backoff_seconds', 'backoff_factor', 'max_attempts', 'target') for name, value in kwargs.iteritems(): if name not in ALLOWED: raise TypeError('Unexpected keyword: %s=%r' % (name, value)) setattr(self, name, value) return self
python
def with_params(self, **kwargs): if _TEST_MODE: logging.info( 'Setting runtime parameters for %s#%s: %r', self, self.pipeline_id, kwargs) return self if self.pipeline_id is not None: raise UnexpectedPipelineError( 'May only call with_params() on a Pipeline that has not yet ' 'been scheduled for execution.') ALLOWED = ('backoff_seconds', 'backoff_factor', 'max_attempts', 'target') for name, value in kwargs.iteritems(): if name not in ALLOWED: raise TypeError('Unexpected keyword: %s=%r' % (name, value)) setattr(self, name, value) return self
[ "def", "with_params", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "_TEST_MODE", ":", "logging", ".", "info", "(", "'Setting runtime parameters for %s#%s: %r'", ",", "self", ",", "self", ".", "pipeline_id", ",", "kwargs", ")", "return", "self", "if"...
Modify various execution parameters of a Pipeline before it runs. This method has no effect in test mode. Args: kwargs: Attributes to modify on this Pipeline instance before it has been executed. Returns: This Pipeline instance, for easy chaining.
[ "Modify", "various", "execution", "parameters", "of", "a", "Pipeline", "before", "it", "runs", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L958-L986
7,613
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline._set_class_path
def _set_class_path(cls, module_dict=sys.modules): """Sets the absolute path to this class as a string. Used by the Pipeline API to reconstruct the Pipeline sub-class object at execution time instead of passing around a serialized function. Args: module_dict: Used for testing. """ # Do not traverse the class hierarchy fetching the class path attribute. found = cls.__dict__.get('_class_path') if found is not None: return # Do not set the _class_path for the base-class, otherwise all children's # lookups for _class_path will fall through and return 'Pipeline' above. # This situation can happen if users call the generic Pipeline.from_id # to get the result of a Pipeline without knowing its specific class. if cls is Pipeline: return class_path = '%s.%s' % (cls.__module__, cls.__name__) # When a WSGI handler is invoked as an entry point, any Pipeline class # defined in the same file as the handler will get __module__ set to # __main__. Thus we need to find out its real fully qualified path. if cls.__module__ == '__main__': for name, module in module_dict.items(): if name == '__main__': continue found = getattr(module, cls.__name__, None) if found is cls: class_path = '%s.%s' % (name, cls.__name__) break cls._class_path = class_path
python
def _set_class_path(cls, module_dict=sys.modules): # Do not traverse the class hierarchy fetching the class path attribute. found = cls.__dict__.get('_class_path') if found is not None: return # Do not set the _class_path for the base-class, otherwise all children's # lookups for _class_path will fall through and return 'Pipeline' above. # This situation can happen if users call the generic Pipeline.from_id # to get the result of a Pipeline without knowing its specific class. if cls is Pipeline: return class_path = '%s.%s' % (cls.__module__, cls.__name__) # When a WSGI handler is invoked as an entry point, any Pipeline class # defined in the same file as the handler will get __module__ set to # __main__. Thus we need to find out its real fully qualified path. if cls.__module__ == '__main__': for name, module in module_dict.items(): if name == '__main__': continue found = getattr(module, cls.__name__, None) if found is cls: class_path = '%s.%s' % (name, cls.__name__) break cls._class_path = class_path
[ "def", "_set_class_path", "(", "cls", ",", "module_dict", "=", "sys", ".", "modules", ")", ":", "# Do not traverse the class hierarchy fetching the class path attribute.", "found", "=", "cls", ".", "__dict__", ".", "get", "(", "'_class_path'", ")", "if", "found", "i...
Sets the absolute path to this class as a string. Used by the Pipeline API to reconstruct the Pipeline sub-class object at execution time instead of passing around a serialized function. Args: module_dict: Used for testing.
[ "Sets", "the", "absolute", "path", "to", "this", "class", "as", "a", "string", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1035-L1068
7,614
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline._set_values_internal
def _set_values_internal(self, context, pipeline_key, root_pipeline_key, outputs, result_status): """Sets the user-visible values provided as an API by this class. Args: context: The _PipelineContext used for this Pipeline. pipeline_key: The db.Key of this pipeline. root_pipeline_key: The db.Key of the root pipeline. outputs: The PipelineFuture for this pipeline. result_status: The result status of this pipeline. """ self._context = context self._pipeline_key = pipeline_key self._root_pipeline_key = root_pipeline_key self._result_status = result_status self.outputs = outputs
python
def _set_values_internal(self, context, pipeline_key, root_pipeline_key, outputs, result_status): self._context = context self._pipeline_key = pipeline_key self._root_pipeline_key = root_pipeline_key self._result_status = result_status self.outputs = outputs
[ "def", "_set_values_internal", "(", "self", ",", "context", ",", "pipeline_key", ",", "root_pipeline_key", ",", "outputs", ",", "result_status", ")", ":", "self", ".", "_context", "=", "context", "self", ".", "_pipeline_key", "=", "pipeline_key", "self", ".", ...
Sets the user-visible values provided as an API by this class. Args: context: The _PipelineContext used for this Pipeline. pipeline_key: The db.Key of this pipeline. root_pipeline_key: The db.Key of the root pipeline. outputs: The PipelineFuture for this pipeline. result_status: The result status of this pipeline.
[ "Sets", "the", "user", "-", "visible", "values", "provided", "as", "an", "API", "by", "this", "class", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1070-L1089
7,615
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline._callback_internal
def _callback_internal(self, kwargs): """Used to execute callbacks on asynchronous pipelines.""" logging.debug('Callback %s(*%s, **%s)#%s with params: %r', self._class_path, _short_repr(self.args), _short_repr(self.kwargs), self._pipeline_key.name(), kwargs) return self.callback(**kwargs)
python
def _callback_internal(self, kwargs): logging.debug('Callback %s(*%s, **%s)#%s with params: %r', self._class_path, _short_repr(self.args), _short_repr(self.kwargs), self._pipeline_key.name(), kwargs) return self.callback(**kwargs)
[ "def", "_callback_internal", "(", "self", ",", "kwargs", ")", ":", "logging", ".", "debug", "(", "'Callback %s(*%s, **%s)#%s with params: %r'", ",", "self", ".", "_class_path", ",", "_short_repr", "(", "self", ".", "args", ")", ",", "_short_repr", "(", "self", ...
Used to execute callbacks on asynchronous pipelines.
[ "Used", "to", "execute", "callbacks", "on", "asynchronous", "pipelines", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1091-L1096
7,616
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline._run_internal
def _run_internal(self, context, pipeline_key, root_pipeline_key, caller_output): """Used by the Pipeline evaluator to execute this Pipeline.""" self._set_values_internal( context, pipeline_key, root_pipeline_key, caller_output, _PipelineRecord.RUN) logging.debug('Running %s(*%s, **%s)#%s', self._class_path, _short_repr(self.args), _short_repr(self.kwargs), self._pipeline_key.name()) return self.run(*self.args, **self.kwargs)
python
def _run_internal(self, context, pipeline_key, root_pipeline_key, caller_output): self._set_values_internal( context, pipeline_key, root_pipeline_key, caller_output, _PipelineRecord.RUN) logging.debug('Running %s(*%s, **%s)#%s', self._class_path, _short_repr(self.args), _short_repr(self.kwargs), self._pipeline_key.name()) return self.run(*self.args, **self.kwargs)
[ "def", "_run_internal", "(", "self", ",", "context", ",", "pipeline_key", ",", "root_pipeline_key", ",", "caller_output", ")", ":", "self", ".", "_set_values_internal", "(", "context", ",", "pipeline_key", ",", "root_pipeline_key", ",", "caller_output", ",", "_Pip...
Used by the Pipeline evaluator to execute this Pipeline.
[ "Used", "by", "the", "Pipeline", "evaluator", "to", "execute", "this", "Pipeline", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1098-L1110
7,617
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline._finalized_internal
def _finalized_internal(self, context, pipeline_key, root_pipeline_key, caller_output, aborted): """Used by the Pipeline evaluator to finalize this Pipeline.""" result_status = _PipelineRecord.RUN if aborted: result_status = _PipelineRecord.ABORTED self._set_values_internal( context, pipeline_key, root_pipeline_key, caller_output, result_status) logging.debug('Finalizing %s(*%r, **%r)#%s', self._class_path, _short_repr(self.args), _short_repr(self.kwargs), self._pipeline_key.name()) try: self.finalized() except NotImplementedError: pass
python
def _finalized_internal(self, context, pipeline_key, root_pipeline_key, caller_output, aborted): result_status = _PipelineRecord.RUN if aborted: result_status = _PipelineRecord.ABORTED self._set_values_internal( context, pipeline_key, root_pipeline_key, caller_output, result_status) logging.debug('Finalizing %s(*%r, **%r)#%s', self._class_path, _short_repr(self.args), _short_repr(self.kwargs), self._pipeline_key.name()) try: self.finalized() except NotImplementedError: pass
[ "def", "_finalized_internal", "(", "self", ",", "context", ",", "pipeline_key", ",", "root_pipeline_key", ",", "caller_output", ",", "aborted", ")", ":", "result_status", "=", "_PipelineRecord", ".", "RUN", "if", "aborted", ":", "result_status", "=", "_PipelineRec...
Used by the Pipeline evaluator to finalize this Pipeline.
[ "Used", "by", "the", "Pipeline", "evaluator", "to", "finalize", "this", "Pipeline", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1112-L1131
7,618
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
InOrder._add_future
def _add_future(cls, future): """Adds a future to the list of in-order futures thus far. Args: future: The future to add to the list. """ if cls._local._activated: cls._local._in_order_futures.add(future)
python
def _add_future(cls, future): if cls._local._activated: cls._local._in_order_futures.add(future)
[ "def", "_add_future", "(", "cls", ",", "future", ")", ":", "if", "cls", ".", "_local", ".", "_activated", ":", "cls", ".", "_local", ".", "_in_order_futures", ".", "add", "(", "future", ")" ]
Adds a future to the list of in-order futures thus far. Args: future: The future to add to the list.
[ "Adds", "a", "future", "to", "the", "list", "of", "in", "-", "order", "futures", "thus", "far", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1190-L1197
7,619
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
InOrder._thread_init
def _thread_init(cls): """Ensure thread local is initialized.""" if not hasattr(cls._local, '_in_order_futures'): cls._local._in_order_futures = set() cls._local._activated = False
python
def _thread_init(cls): if not hasattr(cls._local, '_in_order_futures'): cls._local._in_order_futures = set() cls._local._activated = False
[ "def", "_thread_init", "(", "cls", ")", ":", "if", "not", "hasattr", "(", "cls", ".", "_local", ",", "'_in_order_futures'", ")", ":", "cls", ".", "_local", ".", "_in_order_futures", "=", "set", "(", ")", "cls", ".", "_local", ".", "_activated", "=", "F...
Ensure thread local is initialized.
[ "Ensure", "thread", "local", "is", "initialized", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1224-L1228
7,620
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_PipelineContext.from_environ
def from_environ(cls, environ=os.environ): """Constructs a _PipelineContext from the task queue environment.""" base_path, unused = (environ['PATH_INFO'].rsplit('/', 1) + [''])[:2] return cls( environ['HTTP_X_APPENGINE_TASKNAME'], environ['HTTP_X_APPENGINE_QUEUENAME'], base_path)
python
def from_environ(cls, environ=os.environ): base_path, unused = (environ['PATH_INFO'].rsplit('/', 1) + [''])[:2] return cls( environ['HTTP_X_APPENGINE_TASKNAME'], environ['HTTP_X_APPENGINE_QUEUENAME'], base_path)
[ "def", "from_environ", "(", "cls", ",", "environ", "=", "os", ".", "environ", ")", ":", "base_path", ",", "unused", "=", "(", "environ", "[", "'PATH_INFO'", "]", ".", "rsplit", "(", "'/'", ",", "1", ")", "+", "[", "''", "]", ")", "[", ":", "2", ...
Constructs a _PipelineContext from the task queue environment.
[ "Constructs", "a", "_PipelineContext", "from", "the", "task", "queue", "environment", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1452-L1458
7,621
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_PipelineContext.fill_slot
def fill_slot(self, filler_pipeline_key, slot, value): """Fills a slot, enqueueing a task to trigger pending barriers. Args: filler_pipeline_key: db.Key or stringified key of the _PipelineRecord that filled this slot. slot: The Slot instance to fill. value: The serializable value to assign. Raises: UnexpectedPipelineError if the _SlotRecord for the 'slot' could not be found in the Datastore. """ if not isinstance(filler_pipeline_key, db.Key): filler_pipeline_key = db.Key(filler_pipeline_key) if _TEST_MODE: slot._set_value_test(filler_pipeline_key, value) else: encoded_value = json.dumps(value, sort_keys=True, cls=mr_util.JsonEncoder) value_text = None value_blob = None if len(encoded_value) <= _MAX_JSON_SIZE: value_text = db.Text(encoded_value) else: # The encoded value is too big. Save it as a blob. value_blob = _write_json_blob(encoded_value, filler_pipeline_key.name()) def txn(): slot_record = db.get(slot.key) if slot_record is None: raise UnexpectedPipelineError( 'Tried to fill missing slot "%s" ' 'by pipeline ID "%s" with value: %r' % (slot.key, filler_pipeline_key.name(), value)) # NOTE: Always take the override value here. If down-stream pipelines # need a consitent view of all up-stream outputs (meaning, all of the # outputs came from the same retry attempt of the upstream pipeline), # the down-stream pipeline must also wait for the 'default' output # of these up-stream pipelines. slot_record.filler = filler_pipeline_key slot_record.value_text = value_text slot_record.value_blob = value_blob slot_record.status = _SlotRecord.FILLED slot_record.fill_time = self._gettime() slot_record.put() task = taskqueue.Task( url=self.barrier_handler_path, params=dict( slot_key=slot.key, use_barrier_indexes=True), headers={'X-Ae-Slot-Key': slot.key, 'X-Ae-Filler-Pipeline-Key': filler_pipeline_key}) task.add(queue_name=self.queue_name, transactional=True) db.run_in_transaction_options( db.create_transaction_options(propagation=db.ALLOWED), txn) self.session_filled_output_names.add(slot.name)
python
def fill_slot(self, filler_pipeline_key, slot, value): if not isinstance(filler_pipeline_key, db.Key): filler_pipeline_key = db.Key(filler_pipeline_key) if _TEST_MODE: slot._set_value_test(filler_pipeline_key, value) else: encoded_value = json.dumps(value, sort_keys=True, cls=mr_util.JsonEncoder) value_text = None value_blob = None if len(encoded_value) <= _MAX_JSON_SIZE: value_text = db.Text(encoded_value) else: # The encoded value is too big. Save it as a blob. value_blob = _write_json_blob(encoded_value, filler_pipeline_key.name()) def txn(): slot_record = db.get(slot.key) if slot_record is None: raise UnexpectedPipelineError( 'Tried to fill missing slot "%s" ' 'by pipeline ID "%s" with value: %r' % (slot.key, filler_pipeline_key.name(), value)) # NOTE: Always take the override value here. If down-stream pipelines # need a consitent view of all up-stream outputs (meaning, all of the # outputs came from the same retry attempt of the upstream pipeline), # the down-stream pipeline must also wait for the 'default' output # of these up-stream pipelines. slot_record.filler = filler_pipeline_key slot_record.value_text = value_text slot_record.value_blob = value_blob slot_record.status = _SlotRecord.FILLED slot_record.fill_time = self._gettime() slot_record.put() task = taskqueue.Task( url=self.barrier_handler_path, params=dict( slot_key=slot.key, use_barrier_indexes=True), headers={'X-Ae-Slot-Key': slot.key, 'X-Ae-Filler-Pipeline-Key': filler_pipeline_key}) task.add(queue_name=self.queue_name, transactional=True) db.run_in_transaction_options( db.create_transaction_options(propagation=db.ALLOWED), txn) self.session_filled_output_names.add(slot.name)
[ "def", "fill_slot", "(", "self", ",", "filler_pipeline_key", ",", "slot", ",", "value", ")", ":", "if", "not", "isinstance", "(", "filler_pipeline_key", ",", "db", ".", "Key", ")", ":", "filler_pipeline_key", "=", "db", ".", "Key", "(", "filler_pipeline_key"...
Fills a slot, enqueueing a task to trigger pending barriers. Args: filler_pipeline_key: db.Key or stringified key of the _PipelineRecord that filled this slot. slot: The Slot instance to fill. value: The serializable value to assign. Raises: UnexpectedPipelineError if the _SlotRecord for the 'slot' could not be found in the Datastore.
[ "Fills", "a", "slot", "enqueueing", "a", "task", "to", "trigger", "pending", "barriers", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1460-L1519
7,622
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_PipelineContext.begin_abort
def begin_abort(self, root_pipeline_key, abort_message): """Kicks off the abort process for a root pipeline and all its children. Args: root_pipeline_key: db.Key of the root pipeline to abort. abort_message: Message explaining why the abort happened, only saved into the root pipeline. Returns: True if the abort signal was sent successfully; False otherwise. """ def txn(): pipeline_record = db.get(root_pipeline_key) if pipeline_record is None: logging.warning( 'Tried to abort root pipeline ID "%s" but it does not exist.', root_pipeline_key.name()) raise db.Rollback() if pipeline_record.status == _PipelineRecord.ABORTED: logging.warning( 'Tried to abort root pipeline ID "%s"; already in state: %s', root_pipeline_key.name(), pipeline_record.status) raise db.Rollback() if pipeline_record.abort_requested: logging.warning( 'Tried to abort root pipeline ID "%s"; abort signal already sent.', root_pipeline_key.name()) raise db.Rollback() pipeline_record.abort_requested = True pipeline_record.abort_message = abort_message pipeline_record.put() task = taskqueue.Task( url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key)) task.add(queue_name=self.queue_name, transactional=True) return True return db.run_in_transaction(txn)
python
def begin_abort(self, root_pipeline_key, abort_message): def txn(): pipeline_record = db.get(root_pipeline_key) if pipeline_record is None: logging.warning( 'Tried to abort root pipeline ID "%s" but it does not exist.', root_pipeline_key.name()) raise db.Rollback() if pipeline_record.status == _PipelineRecord.ABORTED: logging.warning( 'Tried to abort root pipeline ID "%s"; already in state: %s', root_pipeline_key.name(), pipeline_record.status) raise db.Rollback() if pipeline_record.abort_requested: logging.warning( 'Tried to abort root pipeline ID "%s"; abort signal already sent.', root_pipeline_key.name()) raise db.Rollback() pipeline_record.abort_requested = True pipeline_record.abort_message = abort_message pipeline_record.put() task = taskqueue.Task( url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key)) task.add(queue_name=self.queue_name, transactional=True) return True return db.run_in_transaction(txn)
[ "def", "begin_abort", "(", "self", ",", "root_pipeline_key", ",", "abort_message", ")", ":", "def", "txn", "(", ")", ":", "pipeline_record", "=", "db", ".", "get", "(", "root_pipeline_key", ")", "if", "pipeline_record", "is", "None", ":", "logging", ".", "...
Kicks off the abort process for a root pipeline and all its children. Args: root_pipeline_key: db.Key of the root pipeline to abort. abort_message: Message explaining why the abort happened, only saved into the root pipeline. Returns: True if the abort signal was sent successfully; False otherwise.
[ "Kicks", "off", "the", "abort", "process", "for", "a", "root", "pipeline", "and", "all", "its", "children", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1667-L1706
7,623
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_PipelineContext.continue_abort
def continue_abort(self, root_pipeline_key, cursor=None, max_to_notify=_MAX_ABORTS_TO_BEGIN): """Sends the abort signal to all children for a root pipeline. Args: root_pipeline_key: db.Key of the root pipeline to abort. cursor: The query cursor for enumerating _PipelineRecords when inserting tasks to cause child pipelines to terminate. max_to_notify: Used for testing. """ if not isinstance(root_pipeline_key, db.Key): root_pipeline_key = db.Key(root_pipeline_key) # NOTE: The results of this query may include _PipelineRecord instances # that are not actually "reachable", meaning you cannot get to them by # starting at the root pipeline and following "fanned_out" onward. This # is acceptable because even these defunct _PipelineRecords will properly # set their status to ABORTED when the signal comes, regardless of any # other status they may have had. # # The only gotcha here is if a Pipeline's finalize method somehow modifies # its inputs (like deleting an input file). In the case there are # unreachable child pipelines, it will appear as if two finalize methods # have been called instead of just one. The saving grace here is that # finalize must be idempotent, so this *should* be harmless. query = ( _PipelineRecord.all(cursor=cursor) .filter('root_pipeline =', root_pipeline_key)) results = query.fetch(max_to_notify) task_list = [] for pipeline_record in results: if pipeline_record.status not in ( _PipelineRecord.RUN, _PipelineRecord.WAITING): continue pipeline_key = pipeline_record.key() task_list.append(taskqueue.Task( name='%s-%s-abort' % (self.task_name, pipeline_key.name()), url=self.abort_handler_path, params=dict(pipeline_key=pipeline_key, purpose=_BarrierRecord.ABORT), headers={'X-Ae-Pipeline-Key': pipeline_key})) # Task continuation with sequence number to prevent fork-bombs. if len(results) == max_to_notify: the_match = re.match('(.*)-([0-9]+)', self.task_name) if the_match: prefix = the_match.group(1) end = int(the_match.group(2)) + 1 else: prefix = self.task_name end = 0 task_list.append(taskqueue.Task( name='%s-%d' % (prefix, end), url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key, cursor=query.cursor()))) if task_list: try: taskqueue.Queue(self.queue_name).add(task_list) except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError): pass
python
def continue_abort(self, root_pipeline_key, cursor=None, max_to_notify=_MAX_ABORTS_TO_BEGIN): if not isinstance(root_pipeline_key, db.Key): root_pipeline_key = db.Key(root_pipeline_key) # NOTE: The results of this query may include _PipelineRecord instances # that are not actually "reachable", meaning you cannot get to them by # starting at the root pipeline and following "fanned_out" onward. This # is acceptable because even these defunct _PipelineRecords will properly # set their status to ABORTED when the signal comes, regardless of any # other status they may have had. # # The only gotcha here is if a Pipeline's finalize method somehow modifies # its inputs (like deleting an input file). In the case there are # unreachable child pipelines, it will appear as if two finalize methods # have been called instead of just one. The saving grace here is that # finalize must be idempotent, so this *should* be harmless. query = ( _PipelineRecord.all(cursor=cursor) .filter('root_pipeline =', root_pipeline_key)) results = query.fetch(max_to_notify) task_list = [] for pipeline_record in results: if pipeline_record.status not in ( _PipelineRecord.RUN, _PipelineRecord.WAITING): continue pipeline_key = pipeline_record.key() task_list.append(taskqueue.Task( name='%s-%s-abort' % (self.task_name, pipeline_key.name()), url=self.abort_handler_path, params=dict(pipeline_key=pipeline_key, purpose=_BarrierRecord.ABORT), headers={'X-Ae-Pipeline-Key': pipeline_key})) # Task continuation with sequence number to prevent fork-bombs. if len(results) == max_to_notify: the_match = re.match('(.*)-([0-9]+)', self.task_name) if the_match: prefix = the_match.group(1) end = int(the_match.group(2)) + 1 else: prefix = self.task_name end = 0 task_list.append(taskqueue.Task( name='%s-%d' % (prefix, end), url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key, cursor=query.cursor()))) if task_list: try: taskqueue.Queue(self.queue_name).add(task_list) except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError): pass
[ "def", "continue_abort", "(", "self", ",", "root_pipeline_key", ",", "cursor", "=", "None", ",", "max_to_notify", "=", "_MAX_ABORTS_TO_BEGIN", ")", ":", "if", "not", "isinstance", "(", "root_pipeline_key", ",", "db", ".", "Key", ")", ":", "root_pipeline_key", ...
Sends the abort signal to all children for a root pipeline. Args: root_pipeline_key: db.Key of the root pipeline to abort. cursor: The query cursor for enumerating _PipelineRecords when inserting tasks to cause child pipelines to terminate. max_to_notify: Used for testing.
[ "Sends", "the", "abort", "signal", "to", "all", "children", "for", "a", "root", "pipeline", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1708-L1771
7,624
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_PipelineContext.start
def start(self, pipeline, return_task=True, countdown=None, eta=None): """Starts a pipeline. Args: pipeline: Pipeline instance to run. return_task: When True, do not submit the task to start the pipeline but instead return it for someone else to enqueue. countdown: Time in seconds into the future that this Task should execute. Defaults to zero. eta: A datetime.datetime specifying the absolute time at which the task should be executed. Must not be specified if 'countdown' is specified. This may be timezone-aware or timezone-naive. If None, defaults to now. For pull tasks, no worker will be able to lease this task before the time indicated by eta. Returns: The task to start this pipeline if return_task was True. Raises: PipelineExistsError if the pipeline with the given ID already exists. """ # Adjust all pipeline output keys for this Pipeline to be children of # the _PipelineRecord, that way we can write them all and submit in a # single transaction. for name, slot in pipeline.outputs._output_dict.iteritems(): slot.key = db.Key.from_path( *slot.key.to_path(), **dict(parent=pipeline._pipeline_key)) _, output_slots, params_text, params_blob = _generate_args( pipeline, pipeline.outputs, self.queue_name, self.base_path) @db.transactional(propagation=db.INDEPENDENT) def txn(): pipeline_record = db.get(pipeline._pipeline_key) if pipeline_record is not None: raise PipelineExistsError( 'Pipeline with idempotence key "%s" already exists; params=%s' % (pipeline._pipeline_key.name(), _short_repr(pipeline_record.params))) entities_to_put = [] for name, slot in pipeline.outputs._output_dict.iteritems(): entities_to_put.append(_SlotRecord( key=slot.key, root_pipeline=pipeline._pipeline_key)) entities_to_put.append(_PipelineRecord( key=pipeline._pipeline_key, root_pipeline=pipeline._pipeline_key, is_root_pipeline=True, # Bug in DB means we need to use the storage name here, # not the local property name. params=params_text, params_blob=params_blob, start_time=self._gettime(), class_path=pipeline._class_path, max_attempts=pipeline.max_attempts)) entities_to_put.extend(_PipelineContext._create_barrier_entities( pipeline._pipeline_key, pipeline._pipeline_key, _BarrierRecord.FINALIZE, output_slots)) db.put(entities_to_put) task = taskqueue.Task( url=self.pipeline_handler_path, params=dict(pipeline_key=pipeline._pipeline_key), headers={'X-Ae-Pipeline-Key': pipeline._pipeline_key}, target=pipeline.target, countdown=countdown, eta=eta) if return_task: return task task.add(queue_name=self.queue_name, transactional=True) task = txn() # Immediately mark the output slots as existing so they can be filled # by asynchronous pipelines or used in test mode. for output_slot in pipeline.outputs._output_dict.itervalues(): output_slot._exists = True return task
python
def start(self, pipeline, return_task=True, countdown=None, eta=None): # Adjust all pipeline output keys for this Pipeline to be children of # the _PipelineRecord, that way we can write them all and submit in a # single transaction. for name, slot in pipeline.outputs._output_dict.iteritems(): slot.key = db.Key.from_path( *slot.key.to_path(), **dict(parent=pipeline._pipeline_key)) _, output_slots, params_text, params_blob = _generate_args( pipeline, pipeline.outputs, self.queue_name, self.base_path) @db.transactional(propagation=db.INDEPENDENT) def txn(): pipeline_record = db.get(pipeline._pipeline_key) if pipeline_record is not None: raise PipelineExistsError( 'Pipeline with idempotence key "%s" already exists; params=%s' % (pipeline._pipeline_key.name(), _short_repr(pipeline_record.params))) entities_to_put = [] for name, slot in pipeline.outputs._output_dict.iteritems(): entities_to_put.append(_SlotRecord( key=slot.key, root_pipeline=pipeline._pipeline_key)) entities_to_put.append(_PipelineRecord( key=pipeline._pipeline_key, root_pipeline=pipeline._pipeline_key, is_root_pipeline=True, # Bug in DB means we need to use the storage name here, # not the local property name. params=params_text, params_blob=params_blob, start_time=self._gettime(), class_path=pipeline._class_path, max_attempts=pipeline.max_attempts)) entities_to_put.extend(_PipelineContext._create_barrier_entities( pipeline._pipeline_key, pipeline._pipeline_key, _BarrierRecord.FINALIZE, output_slots)) db.put(entities_to_put) task = taskqueue.Task( url=self.pipeline_handler_path, params=dict(pipeline_key=pipeline._pipeline_key), headers={'X-Ae-Pipeline-Key': pipeline._pipeline_key}, target=pipeline.target, countdown=countdown, eta=eta) if return_task: return task task.add(queue_name=self.queue_name, transactional=True) task = txn() # Immediately mark the output slots as existing so they can be filled # by asynchronous pipelines or used in test mode. for output_slot in pipeline.outputs._output_dict.itervalues(): output_slot._exists = True return task
[ "def", "start", "(", "self", ",", "pipeline", ",", "return_task", "=", "True", ",", "countdown", "=", "None", ",", "eta", "=", "None", ")", ":", "# Adjust all pipeline output keys for this Pipeline to be children of", "# the _PipelineRecord, that way we can write them all a...
Starts a pipeline. Args: pipeline: Pipeline instance to run. return_task: When True, do not submit the task to start the pipeline but instead return it for someone else to enqueue. countdown: Time in seconds into the future that this Task should execute. Defaults to zero. eta: A datetime.datetime specifying the absolute time at which the task should be executed. Must not be specified if 'countdown' is specified. This may be timezone-aware or timezone-naive. If None, defaults to now. For pull tasks, no worker will be able to lease this task before the time indicated by eta. Returns: The task to start this pipeline if return_task was True. Raises: PipelineExistsError if the pipeline with the given ID already exists.
[ "Starts", "a", "pipeline", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1773-L1855
7,625
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_PipelineContext._create_barrier_entities
def _create_barrier_entities(root_pipeline_key, child_pipeline_key, purpose, blocking_slot_keys): """Creates all of the entities required for a _BarrierRecord. Args: root_pipeline_key: The root pipeline this is part of. child_pipeline_key: The pipeline this barrier is for. purpose: _BarrierRecord.START or _BarrierRecord.FINALIZE. blocking_slot_keys: Set of db.Keys corresponding to _SlotRecords that this barrier should wait on before firing. Returns: List of entities, starting with the _BarrierRecord entity, followed by _BarrierIndexes used for firing when _SlotRecords are filled in the same order as the blocking_slot_keys list provided. All of these entities should be put in the Datastore to ensure the barrier fires properly. """ result = [] blocking_slot_keys = list(blocking_slot_keys) barrier = _BarrierRecord( parent=child_pipeline_key, key_name=purpose, target=child_pipeline_key, root_pipeline=root_pipeline_key, blocking_slots=blocking_slot_keys) result.append(barrier) for slot_key in blocking_slot_keys: barrier_index_path = [] barrier_index_path.extend(slot_key.to_path()) barrier_index_path.extend(child_pipeline_key.to_path()) barrier_index_path.extend([_BarrierIndex.kind(), purpose]) barrier_index_key = db.Key.from_path(*barrier_index_path) barrier_index = _BarrierIndex( key=barrier_index_key, root_pipeline=root_pipeline_key) result.append(barrier_index) return result
python
def _create_barrier_entities(root_pipeline_key, child_pipeline_key, purpose, blocking_slot_keys): result = [] blocking_slot_keys = list(blocking_slot_keys) barrier = _BarrierRecord( parent=child_pipeline_key, key_name=purpose, target=child_pipeline_key, root_pipeline=root_pipeline_key, blocking_slots=blocking_slot_keys) result.append(barrier) for slot_key in blocking_slot_keys: barrier_index_path = [] barrier_index_path.extend(slot_key.to_path()) barrier_index_path.extend(child_pipeline_key.to_path()) barrier_index_path.extend([_BarrierIndex.kind(), purpose]) barrier_index_key = db.Key.from_path(*barrier_index_path) barrier_index = _BarrierIndex( key=barrier_index_key, root_pipeline=root_pipeline_key) result.append(barrier_index) return result
[ "def", "_create_barrier_entities", "(", "root_pipeline_key", ",", "child_pipeline_key", ",", "purpose", ",", "blocking_slot_keys", ")", ":", "result", "=", "[", "]", "blocking_slot_keys", "=", "list", "(", "blocking_slot_keys", ")", "barrier", "=", "_BarrierRecord", ...
Creates all of the entities required for a _BarrierRecord. Args: root_pipeline_key: The root pipeline this is part of. child_pipeline_key: The pipeline this barrier is for. purpose: _BarrierRecord.START or _BarrierRecord.FINALIZE. blocking_slot_keys: Set of db.Keys corresponding to _SlotRecords that this barrier should wait on before firing. Returns: List of entities, starting with the _BarrierRecord entity, followed by _BarrierIndexes used for firing when _SlotRecords are filled in the same order as the blocking_slot_keys list provided. All of these entities should be put in the Datastore to ensure the barrier fires properly.
[ "Creates", "all", "of", "the", "entities", "required", "for", "a", "_BarrierRecord", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L2362-L2405
7,626
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_PipelineContext.handle_run_exception
def handle_run_exception(self, pipeline_key, pipeline_func, e): """Handles an exception raised by a Pipeline's user code. Args: pipeline_key: The pipeline that raised the error. pipeline_func: The class path name of the Pipeline that was running. e: The exception that was raised. Returns: True if the exception should be re-raised up through the calling stack by the caller of this method. """ if isinstance(e, Retry): retry_message = str(e) logging.warning('User forced retry for pipeline ID "%s" of %r: %s', pipeline_key.name(), pipeline_func, retry_message) self.transition_retry(pipeline_key, retry_message) elif isinstance(e, Abort): abort_message = str(e) logging.warning('User forced abort for pipeline ID "%s" of %r: %s', pipeline_key.name(), pipeline_func, abort_message) pipeline_func.abort(abort_message) else: retry_message = '%s: %s' % (e.__class__.__name__, str(e)) logging.exception('Generator %r#%s raised exception. %s', pipeline_func, pipeline_key.name(), retry_message) self.transition_retry(pipeline_key, retry_message) return pipeline_func.task_retry
python
def handle_run_exception(self, pipeline_key, pipeline_func, e): if isinstance(e, Retry): retry_message = str(e) logging.warning('User forced retry for pipeline ID "%s" of %r: %s', pipeline_key.name(), pipeline_func, retry_message) self.transition_retry(pipeline_key, retry_message) elif isinstance(e, Abort): abort_message = str(e) logging.warning('User forced abort for pipeline ID "%s" of %r: %s', pipeline_key.name(), pipeline_func, abort_message) pipeline_func.abort(abort_message) else: retry_message = '%s: %s' % (e.__class__.__name__, str(e)) logging.exception('Generator %r#%s raised exception. %s', pipeline_func, pipeline_key.name(), retry_message) self.transition_retry(pipeline_key, retry_message) return pipeline_func.task_retry
[ "def", "handle_run_exception", "(", "self", ",", "pipeline_key", ",", "pipeline_func", ",", "e", ")", ":", "if", "isinstance", "(", "e", ",", "Retry", ")", ":", "retry_message", "=", "str", "(", "e", ")", "logging", ".", "warning", "(", "'User forced retry...
Handles an exception raised by a Pipeline's user code. Args: pipeline_key: The pipeline that raised the error. pipeline_func: The class path name of the Pipeline that was running. e: The exception that was raised. Returns: True if the exception should be re-raised up through the calling stack by the caller of this method.
[ "Handles", "an", "exception", "raised", "by", "a", "Pipeline", "s", "user", "code", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L2407-L2435
7,627
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_PipelineContext.transition_run
def transition_run(self, pipeline_key, blocking_slot_keys=None, fanned_out_pipelines=None, pipelines_to_run=None): """Marks an asynchronous or generator pipeline as running. Does nothing if the pipeline is no longer in a runnable state. Args: pipeline_key: The db.Key of the _PipelineRecord to update. blocking_slot_keys: List of db.Key instances that this pipeline's finalization barrier should wait on in addition to the existing one. This is used to update the barrier to include all child outputs. When None, the barrier will not be updated. fanned_out_pipelines: List of db.Key instances of _PipelineRecords that were fanned out by this generator pipeline. This is distinct from the 'pipelines_to_run' list because not all of the pipelines listed here will be immediately ready to execute. When None, then this generator yielded no children. pipelines_to_run: List of db.Key instances of _PipelineRecords that should be kicked off (fan-out) transactionally as part of this transition. When None, no child pipelines will run. All db.Keys in this list must also be present in the fanned_out_pipelines list. Raises: UnexpectedPipelineError if blocking_slot_keys was not empty and the _BarrierRecord has gone missing. """ def txn(): pipeline_record = db.get(pipeline_key) if pipeline_record is None: logging.warning('Pipeline ID "%s" cannot be marked as run. ' 'Does not exist.', pipeline_key.name()) raise db.Rollback() if pipeline_record.status != _PipelineRecord.WAITING: logging.warning('Pipeline ID "%s" in bad state to be marked as run: %s', pipeline_key.name(), pipeline_record.status) raise db.Rollback() pipeline_record.status = _PipelineRecord.RUN if fanned_out_pipelines: # NOTE: We must model the pipeline relationship in a top-down manner, # meaning each pipeline must point forward to the pipelines that it # fanned out to. The reason is race conditions. If evaluate() # dies early, it may create many unused _PipelineRecord and _SlotRecord # instances that never progress. The only way we know which of these # are valid is by traversing the graph from the root, where the # fanned_out property refers to those pipelines that were run using a # transactional task. child_pipeline_list = list(fanned_out_pipelines) pipeline_record.fanned_out = child_pipeline_list if pipelines_to_run: child_indexes = [ child_pipeline_list.index(p) for p in pipelines_to_run] child_indexes.sort() task = taskqueue.Task( url=self.fanout_handler_path, params=dict(parent_key=str(pipeline_key), child_indexes=child_indexes)) task.add(queue_name=self.queue_name, transactional=True) pipeline_record.put() if blocking_slot_keys: # NOTE: Always update a generator pipeline's finalization barrier to # include all of the outputs of any pipelines that it runs, to ensure # that finalized calls will not happen until all child pipelines have # completed. This must happen transactionally with the enqueue of # the fan-out kickoff task above to ensure the child output slots and # the barrier blocking slots are the same. barrier_key = db.Key.from_path( _BarrierRecord.kind(), _BarrierRecord.FINALIZE, parent=pipeline_key) finalize_barrier = db.get(barrier_key) if finalize_barrier is None: raise UnexpectedPipelineError( 'Pipeline ID "%s" cannot update finalize barrier. ' 'Does not exist.' % pipeline_key.name()) else: finalize_barrier.blocking_slots = list( blocking_slot_keys.union(set(finalize_barrier.blocking_slots))) finalize_barrier.put() db.run_in_transaction(txn)
python
def transition_run(self, pipeline_key, blocking_slot_keys=None, fanned_out_pipelines=None, pipelines_to_run=None): def txn(): pipeline_record = db.get(pipeline_key) if pipeline_record is None: logging.warning('Pipeline ID "%s" cannot be marked as run. ' 'Does not exist.', pipeline_key.name()) raise db.Rollback() if pipeline_record.status != _PipelineRecord.WAITING: logging.warning('Pipeline ID "%s" in bad state to be marked as run: %s', pipeline_key.name(), pipeline_record.status) raise db.Rollback() pipeline_record.status = _PipelineRecord.RUN if fanned_out_pipelines: # NOTE: We must model the pipeline relationship in a top-down manner, # meaning each pipeline must point forward to the pipelines that it # fanned out to. The reason is race conditions. If evaluate() # dies early, it may create many unused _PipelineRecord and _SlotRecord # instances that never progress. The only way we know which of these # are valid is by traversing the graph from the root, where the # fanned_out property refers to those pipelines that were run using a # transactional task. child_pipeline_list = list(fanned_out_pipelines) pipeline_record.fanned_out = child_pipeline_list if pipelines_to_run: child_indexes = [ child_pipeline_list.index(p) for p in pipelines_to_run] child_indexes.sort() task = taskqueue.Task( url=self.fanout_handler_path, params=dict(parent_key=str(pipeline_key), child_indexes=child_indexes)) task.add(queue_name=self.queue_name, transactional=True) pipeline_record.put() if blocking_slot_keys: # NOTE: Always update a generator pipeline's finalization barrier to # include all of the outputs of any pipelines that it runs, to ensure # that finalized calls will not happen until all child pipelines have # completed. This must happen transactionally with the enqueue of # the fan-out kickoff task above to ensure the child output slots and # the barrier blocking slots are the same. barrier_key = db.Key.from_path( _BarrierRecord.kind(), _BarrierRecord.FINALIZE, parent=pipeline_key) finalize_barrier = db.get(barrier_key) if finalize_barrier is None: raise UnexpectedPipelineError( 'Pipeline ID "%s" cannot update finalize barrier. ' 'Does not exist.' % pipeline_key.name()) else: finalize_barrier.blocking_slots = list( blocking_slot_keys.union(set(finalize_barrier.blocking_slots))) finalize_barrier.put() db.run_in_transaction(txn)
[ "def", "transition_run", "(", "self", ",", "pipeline_key", ",", "blocking_slot_keys", "=", "None", ",", "fanned_out_pipelines", "=", "None", ",", "pipelines_to_run", "=", "None", ")", ":", "def", "txn", "(", ")", ":", "pipeline_record", "=", "db", ".", "get"...
Marks an asynchronous or generator pipeline as running. Does nothing if the pipeline is no longer in a runnable state. Args: pipeline_key: The db.Key of the _PipelineRecord to update. blocking_slot_keys: List of db.Key instances that this pipeline's finalization barrier should wait on in addition to the existing one. This is used to update the barrier to include all child outputs. When None, the barrier will not be updated. fanned_out_pipelines: List of db.Key instances of _PipelineRecords that were fanned out by this generator pipeline. This is distinct from the 'pipelines_to_run' list because not all of the pipelines listed here will be immediately ready to execute. When None, then this generator yielded no children. pipelines_to_run: List of db.Key instances of _PipelineRecords that should be kicked off (fan-out) transactionally as part of this transition. When None, no child pipelines will run. All db.Keys in this list must also be present in the fanned_out_pipelines list. Raises: UnexpectedPipelineError if blocking_slot_keys was not empty and the _BarrierRecord has gone missing.
[ "Marks", "an", "asynchronous", "or", "generator", "pipeline", "as", "running", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L2437-L2523
7,628
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_PipelineContext.transition_complete
def transition_complete(self, pipeline_key): """Marks the given pipeline as complete. Does nothing if the pipeline is no longer in a state that can be completed. Args: pipeline_key: db.Key of the _PipelineRecord that has completed. """ def txn(): pipeline_record = db.get(pipeline_key) if pipeline_record is None: logging.warning( 'Tried to mark pipeline ID "%s" as complete but it does not exist.', pipeline_key.name()) raise db.Rollback() if pipeline_record.status not in ( _PipelineRecord.WAITING, _PipelineRecord.RUN): logging.warning( 'Tried to mark pipeline ID "%s" as complete, found bad state: %s', pipeline_key.name(), pipeline_record.status) raise db.Rollback() pipeline_record.status = _PipelineRecord.DONE pipeline_record.finalized_time = self._gettime() pipeline_record.put() db.run_in_transaction(txn)
python
def transition_complete(self, pipeline_key): def txn(): pipeline_record = db.get(pipeline_key) if pipeline_record is None: logging.warning( 'Tried to mark pipeline ID "%s" as complete but it does not exist.', pipeline_key.name()) raise db.Rollback() if pipeline_record.status not in ( _PipelineRecord.WAITING, _PipelineRecord.RUN): logging.warning( 'Tried to mark pipeline ID "%s" as complete, found bad state: %s', pipeline_key.name(), pipeline_record.status) raise db.Rollback() pipeline_record.status = _PipelineRecord.DONE pipeline_record.finalized_time = self._gettime() pipeline_record.put() db.run_in_transaction(txn)
[ "def", "transition_complete", "(", "self", ",", "pipeline_key", ")", ":", "def", "txn", "(", ")", ":", "pipeline_record", "=", "db", ".", "get", "(", "pipeline_key", ")", "if", "pipeline_record", "is", "None", ":", "logging", ".", "warning", "(", "'Tried t...
Marks the given pipeline as complete. Does nothing if the pipeline is no longer in a state that can be completed. Args: pipeline_key: db.Key of the _PipelineRecord that has completed.
[ "Marks", "the", "given", "pipeline", "as", "complete", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L2525-L2551
7,629
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_PipelineContext.transition_retry
def transition_retry(self, pipeline_key, retry_message): """Marks the given pipeline as requiring another retry. Does nothing if all attempts have been exceeded. Args: pipeline_key: db.Key of the _PipelineRecord that needs to be retried. retry_message: User-supplied message indicating the reason for the retry. """ def txn(): pipeline_record = db.get(pipeline_key) if pipeline_record is None: logging.warning( 'Tried to retry pipeline ID "%s" but it does not exist.', pipeline_key.name()) raise db.Rollback() if pipeline_record.status not in ( _PipelineRecord.WAITING, _PipelineRecord.RUN): logging.warning( 'Tried to retry pipeline ID "%s", found bad state: %s', pipeline_key.name(), pipeline_record.status) raise db.Rollback() params = pipeline_record.params offset_seconds = ( params['backoff_seconds'] * (params['backoff_factor'] ** pipeline_record.current_attempt)) pipeline_record.next_retry_time = ( self._gettime() + datetime.timedelta(seconds=offset_seconds)) pipeline_record.current_attempt += 1 pipeline_record.retry_message = retry_message pipeline_record.status = _PipelineRecord.WAITING if pipeline_record.current_attempt >= pipeline_record.max_attempts: root_pipeline_key = ( _PipelineRecord.root_pipeline.get_value_for_datastore( pipeline_record)) logging.warning( 'Giving up on pipeline ID "%s" after %d attempt(s); causing abort ' 'all the way to the root pipeline ID "%s"', pipeline_key.name(), pipeline_record.current_attempt, root_pipeline_key.name()) # NOTE: We do *not* set the status to aborted here to ensure that # this pipeline will be finalized before it has been marked as aborted. pipeline_record.abort_message = ( 'Aborting after %d attempts' % pipeline_record.current_attempt) task = taskqueue.Task( url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key)) task.add(queue_name=self.queue_name, transactional=True) else: task = taskqueue.Task( url=self.pipeline_handler_path, eta=pipeline_record.next_retry_time, params=dict(pipeline_key=pipeline_key, purpose=_BarrierRecord.START, attempt=pipeline_record.current_attempt), headers={'X-Ae-Pipeline-Key': pipeline_key}, target=pipeline_record.params['target']) task.add(queue_name=self.queue_name, transactional=True) pipeline_record.put() db.run_in_transaction(txn)
python
def transition_retry(self, pipeline_key, retry_message): def txn(): pipeline_record = db.get(pipeline_key) if pipeline_record is None: logging.warning( 'Tried to retry pipeline ID "%s" but it does not exist.', pipeline_key.name()) raise db.Rollback() if pipeline_record.status not in ( _PipelineRecord.WAITING, _PipelineRecord.RUN): logging.warning( 'Tried to retry pipeline ID "%s", found bad state: %s', pipeline_key.name(), pipeline_record.status) raise db.Rollback() params = pipeline_record.params offset_seconds = ( params['backoff_seconds'] * (params['backoff_factor'] ** pipeline_record.current_attempt)) pipeline_record.next_retry_time = ( self._gettime() + datetime.timedelta(seconds=offset_seconds)) pipeline_record.current_attempt += 1 pipeline_record.retry_message = retry_message pipeline_record.status = _PipelineRecord.WAITING if pipeline_record.current_attempt >= pipeline_record.max_attempts: root_pipeline_key = ( _PipelineRecord.root_pipeline.get_value_for_datastore( pipeline_record)) logging.warning( 'Giving up on pipeline ID "%s" after %d attempt(s); causing abort ' 'all the way to the root pipeline ID "%s"', pipeline_key.name(), pipeline_record.current_attempt, root_pipeline_key.name()) # NOTE: We do *not* set the status to aborted here to ensure that # this pipeline will be finalized before it has been marked as aborted. pipeline_record.abort_message = ( 'Aborting after %d attempts' % pipeline_record.current_attempt) task = taskqueue.Task( url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key)) task.add(queue_name=self.queue_name, transactional=True) else: task = taskqueue.Task( url=self.pipeline_handler_path, eta=pipeline_record.next_retry_time, params=dict(pipeline_key=pipeline_key, purpose=_BarrierRecord.START, attempt=pipeline_record.current_attempt), headers={'X-Ae-Pipeline-Key': pipeline_key}, target=pipeline_record.params['target']) task.add(queue_name=self.queue_name, transactional=True) pipeline_record.put() db.run_in_transaction(txn)
[ "def", "transition_retry", "(", "self", ",", "pipeline_key", ",", "retry_message", ")", ":", "def", "txn", "(", ")", ":", "pipeline_record", "=", "db", ".", "get", "(", "pipeline_key", ")", "if", "pipeline_record", "is", "None", ":", "logging", ".", "warni...
Marks the given pipeline as requiring another retry. Does nothing if all attempts have been exceeded. Args: pipeline_key: db.Key of the _PipelineRecord that needs to be retried. retry_message: User-supplied message indicating the reason for the retry.
[ "Marks", "the", "given", "pipeline", "as", "requiring", "another", "retry", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L2553-L2615
7,630
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_CallbackHandler.run_callback
def run_callback(self): """Runs the callback for the pipeline specified in the request. Raises: _CallbackTaskError if something was wrong with the request parameters. """ pipeline_id = self.request.get('pipeline_id') if not pipeline_id: raise _CallbackTaskError('"pipeline_id" parameter missing.') pipeline_key = db.Key.from_path(_PipelineRecord.kind(), pipeline_id) pipeline_record = db.get(pipeline_key) if pipeline_record is None: raise _CallbackTaskError( 'Pipeline ID "%s" for callback does not exist.' % pipeline_id) params = pipeline_record.params real_class_path = params['class_path'] try: pipeline_func_class = mr_util.for_name(real_class_path) except ImportError, e: raise _CallbackTaskError( 'Cannot load class named "%s" for pipeline ID "%s".' % (real_class_path, pipeline_id)) if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ: if pipeline_func_class.public_callbacks: pass elif pipeline_func_class.admin_callbacks: if not users.is_current_user_admin(): raise _CallbackTaskError( 'Unauthorized callback for admin-only pipeline ID "%s"' % pipeline_id) else: raise _CallbackTaskError( 'External callback for internal-only pipeline ID "%s"' % pipeline_id) kwargs = {} for key in self.request.arguments(): if key != 'pipeline_id': kwargs[str(key)] = self.request.get(key) def perform_callback(): stage = pipeline_func_class.from_id(pipeline_id) if stage is None: raise _CallbackTaskError( 'Pipeline ID "%s" deleted during callback' % pipeline_id) return stage._callback_internal(kwargs) # callback_xg_transaction is a 3-valued setting (None=no trans, # False=1-eg-trans, True=xg-trans) if pipeline_func_class._callback_xg_transaction is not None: transaction_options = db.create_transaction_options( xg=pipeline_func_class._callback_xg_transaction) callback_result = db.run_in_transaction_options(transaction_options, perform_callback) else: callback_result = perform_callback() if callback_result is not None: status_code, content_type, content = callback_result self.response.set_status(status_code) self.response.headers['Content-Type'] = content_type self.response.out.write(content)
python
def run_callback(self): pipeline_id = self.request.get('pipeline_id') if not pipeline_id: raise _CallbackTaskError('"pipeline_id" parameter missing.') pipeline_key = db.Key.from_path(_PipelineRecord.kind(), pipeline_id) pipeline_record = db.get(pipeline_key) if pipeline_record is None: raise _CallbackTaskError( 'Pipeline ID "%s" for callback does not exist.' % pipeline_id) params = pipeline_record.params real_class_path = params['class_path'] try: pipeline_func_class = mr_util.for_name(real_class_path) except ImportError, e: raise _CallbackTaskError( 'Cannot load class named "%s" for pipeline ID "%s".' % (real_class_path, pipeline_id)) if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ: if pipeline_func_class.public_callbacks: pass elif pipeline_func_class.admin_callbacks: if not users.is_current_user_admin(): raise _CallbackTaskError( 'Unauthorized callback for admin-only pipeline ID "%s"' % pipeline_id) else: raise _CallbackTaskError( 'External callback for internal-only pipeline ID "%s"' % pipeline_id) kwargs = {} for key in self.request.arguments(): if key != 'pipeline_id': kwargs[str(key)] = self.request.get(key) def perform_callback(): stage = pipeline_func_class.from_id(pipeline_id) if stage is None: raise _CallbackTaskError( 'Pipeline ID "%s" deleted during callback' % pipeline_id) return stage._callback_internal(kwargs) # callback_xg_transaction is a 3-valued setting (None=no trans, # False=1-eg-trans, True=xg-trans) if pipeline_func_class._callback_xg_transaction is not None: transaction_options = db.create_transaction_options( xg=pipeline_func_class._callback_xg_transaction) callback_result = db.run_in_transaction_options(transaction_options, perform_callback) else: callback_result = perform_callback() if callback_result is not None: status_code, content_type, content = callback_result self.response.set_status(status_code) self.response.headers['Content-Type'] = content_type self.response.out.write(content)
[ "def", "run_callback", "(", "self", ")", ":", "pipeline_id", "=", "self", ".", "request", ".", "get", "(", "'pipeline_id'", ")", "if", "not", "pipeline_id", ":", "raise", "_CallbackTaskError", "(", "'\"pipeline_id\" parameter missing.'", ")", "pipeline_key", "=", ...
Runs the callback for the pipeline specified in the request. Raises: _CallbackTaskError if something was wrong with the request parameters.
[ "Runs", "the", "callback", "for", "the", "pipeline", "specified", "in", "the", "request", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L2803-L2867
7,631
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/__init__.py
_fix_path
def _fix_path(): """Finds the google_appengine directory and fixes Python imports to use it.""" import os import sys all_paths = os.environ.get('PYTHONPATH').split(os.pathsep) for path_dir in all_paths: dev_appserver_path = os.path.join(path_dir, 'dev_appserver.py') if os.path.exists(dev_appserver_path): logging.debug('Found appengine SDK on path!') google_appengine = os.path.dirname(os.path.realpath(dev_appserver_path)) sys.path.append(google_appengine) # Use the next import will fix up sys.path even further to bring in # any dependent lib directories that the SDK needs. dev_appserver = __import__('dev_appserver') sys.path.extend(dev_appserver.EXTRA_PATHS) return
python
def _fix_path(): import os import sys all_paths = os.environ.get('PYTHONPATH').split(os.pathsep) for path_dir in all_paths: dev_appserver_path = os.path.join(path_dir, 'dev_appserver.py') if os.path.exists(dev_appserver_path): logging.debug('Found appengine SDK on path!') google_appengine = os.path.dirname(os.path.realpath(dev_appserver_path)) sys.path.append(google_appengine) # Use the next import will fix up sys.path even further to bring in # any dependent lib directories that the SDK needs. dev_appserver = __import__('dev_appserver') sys.path.extend(dev_appserver.EXTRA_PATHS) return
[ "def", "_fix_path", "(", ")", ":", "import", "os", "import", "sys", "all_paths", "=", "os", ".", "environ", ".", "get", "(", "'PYTHONPATH'", ")", ".", "split", "(", "os", ".", "pathsep", ")", "for", "path_dir", "in", "all_paths", ":", "dev_appserver_path...
Finds the google_appengine directory and fixes Python imports to use it.
[ "Finds", "the", "google_appengine", "directory", "and", "fixes", "Python", "imports", "to", "use", "it", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/__init__.py#L22-L37
7,632
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/models.py
_PipelineRecord.params
def params(self): """Returns the dictionary of parameters for this Pipeline.""" if hasattr(self, '_params_decoded'): return self._params_decoded if self.params_blob is not None: value_encoded = self.params_blob.open().read() else: value_encoded = self.params_text value = json.loads(value_encoded, cls=util.JsonDecoder) if isinstance(value, dict): kwargs = value.get('kwargs') if kwargs: adjusted_kwargs = {} for arg_key, arg_value in kwargs.iteritems(): # Python only allows non-unicode strings as keyword arguments. adjusted_kwargs[str(arg_key)] = arg_value value['kwargs'] = adjusted_kwargs self._params_decoded = value return self._params_decoded
python
def params(self): if hasattr(self, '_params_decoded'): return self._params_decoded if self.params_blob is not None: value_encoded = self.params_blob.open().read() else: value_encoded = self.params_text value = json.loads(value_encoded, cls=util.JsonDecoder) if isinstance(value, dict): kwargs = value.get('kwargs') if kwargs: adjusted_kwargs = {} for arg_key, arg_value in kwargs.iteritems(): # Python only allows non-unicode strings as keyword arguments. adjusted_kwargs[str(arg_key)] = arg_value value['kwargs'] = adjusted_kwargs self._params_decoded = value return self._params_decoded
[ "def", "params", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'_params_decoded'", ")", ":", "return", "self", ".", "_params_decoded", "if", "self", ".", "params_blob", "is", "not", "None", ":", "value_encoded", "=", "self", ".", "params_blob"...
Returns the dictionary of parameters for this Pipeline.
[ "Returns", "the", "dictionary", "of", "parameters", "for", "this", "Pipeline", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/models.py#L96-L117
7,633
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/models.py
_SlotRecord.value
def value(self): """Returns the value of this Slot.""" if hasattr(self, '_value_decoded'): return self._value_decoded if self.value_blob is not None: encoded_value = self.value_blob.open().read() else: encoded_value = self.value_text self._value_decoded = json.loads(encoded_value, cls=util.JsonDecoder) return self._value_decoded
python
def value(self): if hasattr(self, '_value_decoded'): return self._value_decoded if self.value_blob is not None: encoded_value = self.value_blob.open().read() else: encoded_value = self.value_text self._value_decoded = json.loads(encoded_value, cls=util.JsonDecoder) return self._value_decoded
[ "def", "value", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'_value_decoded'", ")", ":", "return", "self", ".", "_value_decoded", "if", "self", ".", "value_blob", "is", "not", "None", ":", "encoded_value", "=", "self", ".", "value_blob", "...
Returns the value of this Slot.
[ "Returns", "the", "value", "of", "this", "Slot", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/models.py#L156-L167
7,634
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/models.py
_BarrierIndex.to_barrier_key
def to_barrier_key(cls, barrier_index_key): """Converts a _BarrierIndex key to a _BarrierRecord key. Args: barrier_index_key: db.Key for a _BarrierIndex entity. Returns: db.Key for the corresponding _BarrierRecord entity. """ barrier_index_path = barrier_index_key.to_path() # Pick out the items from the _BarrierIndex key path that we need to # construct the _BarrierRecord key path. (pipeline_kind, dependent_pipeline_id, unused_kind, purpose) = barrier_index_path[-4:] barrier_record_path = ( pipeline_kind, dependent_pipeline_id, _BarrierRecord.kind(), purpose) return db.Key.from_path(*barrier_record_path)
python
def to_barrier_key(cls, barrier_index_key): barrier_index_path = barrier_index_key.to_path() # Pick out the items from the _BarrierIndex key path that we need to # construct the _BarrierRecord key path. (pipeline_kind, dependent_pipeline_id, unused_kind, purpose) = barrier_index_path[-4:] barrier_record_path = ( pipeline_kind, dependent_pipeline_id, _BarrierRecord.kind(), purpose) return db.Key.from_path(*barrier_record_path)
[ "def", "to_barrier_key", "(", "cls", ",", "barrier_index_key", ")", ":", "barrier_index_path", "=", "barrier_index_key", ".", "to_path", "(", ")", "# Pick out the items from the _BarrierIndex key path that we need to", "# construct the _BarrierRecord key path.", "(", "pipeline_ki...
Converts a _BarrierIndex key to a _BarrierRecord key. Args: barrier_index_key: db.Key for a _BarrierIndex entity. Returns: db.Key for the corresponding _BarrierRecord entity.
[ "Converts", "a", "_BarrierIndex", "key", "to", "a", "_BarrierRecord", "key", "." ]
277394648dac3e8214677af898935d07399ac8e1
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/models.py#L251-L271
7,635
bcbnz/pylabels
labels/sheet.py
Sheet.partial_page
def partial_page(self, page, used_labels): """Allows a page to be marked as already partially used so you can generate a PDF to print on the remaining labels. Parameters ---------- page: positive integer The page number to mark as partially used. The page must not have already been started, i.e., for page 1 this must be called before any labels have been started, for page 2 this must be called before the first page is full and so on. used_labels: iterable An iterable of (row, column) pairs marking which labels have been used already. The rows and columns must be within the bounds of the sheet. """ # Check the page number is valid. if page <= self.page_count: raise ValueError("Page {0:d} has already started, cannot mark used labels now.".format(page)) # Add these to any existing labels marked as used. used = self._used.get(page, set()) for row, column in used_labels: # Check the index is valid. if row < 1 or row > self.specs.rows: raise IndexError("Invalid row number: {0:d}.".format(row)) if column < 1 or column > self.specs.columns: raise IndexError("Invalid column number: {0:d}.".format(column)) # Add it. used.add((int(row), int(column))) # Save the details. self._used[page] = used
python
def partial_page(self, page, used_labels): # Check the page number is valid. if page <= self.page_count: raise ValueError("Page {0:d} has already started, cannot mark used labels now.".format(page)) # Add these to any existing labels marked as used. used = self._used.get(page, set()) for row, column in used_labels: # Check the index is valid. if row < 1 or row > self.specs.rows: raise IndexError("Invalid row number: {0:d}.".format(row)) if column < 1 or column > self.specs.columns: raise IndexError("Invalid column number: {0:d}.".format(column)) # Add it. used.add((int(row), int(column))) # Save the details. self._used[page] = used
[ "def", "partial_page", "(", "self", ",", "page", ",", "used_labels", ")", ":", "# Check the page number is valid.", "if", "page", "<=", "self", ".", "page_count", ":", "raise", "ValueError", "(", "\"Page {0:d} has already started, cannot mark used labels now.\"", ".", "...
Allows a page to be marked as already partially used so you can generate a PDF to print on the remaining labels. Parameters ---------- page: positive integer The page number to mark as partially used. The page must not have already been started, i.e., for page 1 this must be called before any labels have been started, for page 2 this must be called before the first page is full and so on. used_labels: iterable An iterable of (row, column) pairs marking which labels have been used already. The rows and columns must be within the bounds of the sheet.
[ "Allows", "a", "page", "to", "be", "marked", "as", "already", "partially", "used", "so", "you", "can", "generate", "a", "PDF", "to", "print", "on", "the", "remaining", "labels", "." ]
ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6
https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L205-L239
7,636
bcbnz/pylabels
labels/sheet.py
Sheet._new_page
def _new_page(self): """Helper function to start a new page. Not intended for external use. """ self._current_page = Drawing(*self._pagesize) if self._bgimage: self._current_page.add(self._bgimage) self._pages.append(self._current_page) self.page_count += 1 self._position = [1, 0]
python
def _new_page(self): self._current_page = Drawing(*self._pagesize) if self._bgimage: self._current_page.add(self._bgimage) self._pages.append(self._current_page) self.page_count += 1 self._position = [1, 0]
[ "def", "_new_page", "(", "self", ")", ":", "self", ".", "_current_page", "=", "Drawing", "(", "*", "self", ".", "_pagesize", ")", "if", "self", ".", "_bgimage", ":", "self", ".", "_current_page", ".", "add", "(", "self", ".", "_bgimage", ")", "self", ...
Helper function to start a new page. Not intended for external use.
[ "Helper", "function", "to", "start", "a", "new", "page", ".", "Not", "intended", "for", "external", "use", "." ]
ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6
https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L241-L250
7,637
bcbnz/pylabels
labels/sheet.py
Sheet._next_label
def _next_label(self): """Helper method to move to the next label. Not intended for external use. This does not increment the label_count attribute as the next label may not be usable (it may have been marked as missing through partial_pages). See _next_unused_label for generally more useful method. """ # Special case for the very first label. if self.page_count == 0: self._new_page() # Filled up a page. elif self._position == self._numlabels: self._new_page() # Filled up a row. elif self._position[1] == self.specs.columns: self._position[0] += 1 self._position[1] = 0 # Move to the next column. self._position[1] += 1
python
def _next_label(self): # Special case for the very first label. if self.page_count == 0: self._new_page() # Filled up a page. elif self._position == self._numlabels: self._new_page() # Filled up a row. elif self._position[1] == self.specs.columns: self._position[0] += 1 self._position[1] = 0 # Move to the next column. self._position[1] += 1
[ "def", "_next_label", "(", "self", ")", ":", "# Special case for the very first label.", "if", "self", ".", "page_count", "==", "0", ":", "self", ".", "_new_page", "(", ")", "# Filled up a page.", "elif", "self", ".", "_position", "==", "self", ".", "_numlabels"...
Helper method to move to the next label. Not intended for external use. This does not increment the label_count attribute as the next label may not be usable (it may have been marked as missing through partial_pages). See _next_unused_label for generally more useful method.
[ "Helper", "method", "to", "move", "to", "the", "next", "label", ".", "Not", "intended", "for", "external", "use", "." ]
ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6
https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L252-L274
7,638
bcbnz/pylabels
labels/sheet.py
Sheet._next_unused_label
def _next_unused_label(self): """Helper method to move to the next unused label. Not intended for external use. This method will shade in any missing labels if desired, and will increment the label_count attribute once a suitable label position has been found. """ self._next_label() # This label may be missing. if self.page_count in self._used: # Keep try while the label is missing. missing = self._used.get(self.page_count, set()) while tuple(self._position) in missing: # Throw the missing information away now we have used it. This # allows the _shade_remaining_missing method to work. missing.discard(tuple(self._position)) # Shade the missing label if desired. if self.shade_missing: self._shade_missing_label() # Try our luck with the next label. self._next_label() missing = self._used.get(self.page_count, set()) # Increment the count now we have found a suitable position. self.label_count += 1
python
def _next_unused_label(self): self._next_label() # This label may be missing. if self.page_count in self._used: # Keep try while the label is missing. missing = self._used.get(self.page_count, set()) while tuple(self._position) in missing: # Throw the missing information away now we have used it. This # allows the _shade_remaining_missing method to work. missing.discard(tuple(self._position)) # Shade the missing label if desired. if self.shade_missing: self._shade_missing_label() # Try our luck with the next label. self._next_label() missing = self._used.get(self.page_count, set()) # Increment the count now we have found a suitable position. self.label_count += 1
[ "def", "_next_unused_label", "(", "self", ")", ":", "self", ".", "_next_label", "(", ")", "# This label may be missing.", "if", "self", ".", "page_count", "in", "self", ".", "_used", ":", "# Keep try while the label is missing.", "missing", "=", "self", ".", "_use...
Helper method to move to the next unused label. Not intended for external use. This method will shade in any missing labels if desired, and will increment the label_count attribute once a suitable label position has been found.
[ "Helper", "method", "to", "move", "to", "the", "next", "unused", "label", ".", "Not", "intended", "for", "external", "use", "." ]
ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6
https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L276-L304
7,639
bcbnz/pylabels
labels/sheet.py
Sheet._calculate_edges
def _calculate_edges(self): """Calculate edges of the current label. Not intended for external use. """ # Calculate the left edge of the label. left = self.specs.left_margin left += (self.specs.label_width * (self._position[1] - 1)) if self.specs.column_gap: left += (self.specs.column_gap * (self._position[1] - 1)) left *= mm # And the bottom. bottom = self.specs.sheet_height - self.specs.top_margin bottom -= (self.specs.label_height * self._position[0]) if self.specs.row_gap: bottom -= (self.specs.row_gap * (self._position[0] - 1)) bottom *= mm # Done. return float(left), float(bottom)
python
def _calculate_edges(self): # Calculate the left edge of the label. left = self.specs.left_margin left += (self.specs.label_width * (self._position[1] - 1)) if self.specs.column_gap: left += (self.specs.column_gap * (self._position[1] - 1)) left *= mm # And the bottom. bottom = self.specs.sheet_height - self.specs.top_margin bottom -= (self.specs.label_height * self._position[0]) if self.specs.row_gap: bottom -= (self.specs.row_gap * (self._position[0] - 1)) bottom *= mm # Done. return float(left), float(bottom)
[ "def", "_calculate_edges", "(", "self", ")", ":", "# Calculate the left edge of the label.", "left", "=", "self", ".", "specs", ".", "left_margin", "left", "+=", "(", "self", ".", "specs", ".", "label_width", "*", "(", "self", ".", "_position", "[", "1", "]"...
Calculate edges of the current label. Not intended for external use.
[ "Calculate", "edges", "of", "the", "current", "label", ".", "Not", "intended", "for", "external", "use", "." ]
ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6
https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L306-L326
7,640
bcbnz/pylabels
labels/sheet.py
Sheet._shade_missing_label
def _shade_missing_label(self): """Helper method to shade a missing label. Not intended for external use. """ # Start a drawing for the whole label. label = Drawing(float(self._lw), float(self._lh)) label.add(self._clip_label) # Fill with a rectangle; the clipping path will take care of the borders. r = shapes.Rect(0, 0, float(self._lw), float(self._lh)) r.fillColor = self.shade_missing r.strokeColor = None label.add(r) # Add the label to the page. label.shift(*self._calculate_edges()) self._current_page.add(label)
python
def _shade_missing_label(self): # Start a drawing for the whole label. label = Drawing(float(self._lw), float(self._lh)) label.add(self._clip_label) # Fill with a rectangle; the clipping path will take care of the borders. r = shapes.Rect(0, 0, float(self._lw), float(self._lh)) r.fillColor = self.shade_missing r.strokeColor = None label.add(r) # Add the label to the page. label.shift(*self._calculate_edges()) self._current_page.add(label)
[ "def", "_shade_missing_label", "(", "self", ")", ":", "# Start a drawing for the whole label.", "label", "=", "Drawing", "(", "float", "(", "self", ".", "_lw", ")", ",", "float", "(", "self", ".", "_lh", ")", ")", "label", ".", "add", "(", "self", ".", "...
Helper method to shade a missing label. Not intended for external use.
[ "Helper", "method", "to", "shade", "a", "missing", "label", ".", "Not", "intended", "for", "external", "use", "." ]
ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6
https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L328-L344
7,641
bcbnz/pylabels
labels/sheet.py
Sheet._shade_remaining_missing
def _shade_remaining_missing(self): """Helper method to shade any missing labels remaining on the current page. Not intended for external use. Note that this will modify the internal _position attribute and should therefore only be used once all the 'real' labels have been drawn. """ # Sanity check. if not self.shade_missing: return # Run through each missing label left in the current page and shade it. missing = self._used.get(self.page_count, set()) for position in missing: self._position = position self._shade_missing_label()
python
def _shade_remaining_missing(self): # Sanity check. if not self.shade_missing: return # Run through each missing label left in the current page and shade it. missing = self._used.get(self.page_count, set()) for position in missing: self._position = position self._shade_missing_label()
[ "def", "_shade_remaining_missing", "(", "self", ")", ":", "# Sanity check.", "if", "not", "self", ".", "shade_missing", ":", "return", "# Run through each missing label left in the current page and shade it.", "missing", "=", "self", ".", "_used", ".", "get", "(", "self...
Helper method to shade any missing labels remaining on the current page. Not intended for external use. Note that this will modify the internal _position attribute and should therefore only be used once all the 'real' labels have been drawn.
[ "Helper", "method", "to", "shade", "any", "missing", "labels", "remaining", "on", "the", "current", "page", ".", "Not", "intended", "for", "external", "use", "." ]
ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6
https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L346-L362
7,642
bcbnz/pylabels
labels/sheet.py
Sheet._draw_label
def _draw_label(self, obj, count): """Helper method to draw on the current label. Not intended for external use. """ # Start a drawing for the whole label. label = Drawing(float(self._lw), float(self._lh)) label.add(self._clip_label) # And one for the available area (i.e., after padding). available = Drawing(float(self._dw), float(self._dh)) available.add(self._clip_drawing) # Call the drawing function. self.drawing_callable(available, float(self._dw), float(self._dh), obj) # Render the contents on the label. available.shift(float(self._lp), float(self._bp)) label.add(available) # Draw the border if requested. if self.border: label.add(self._border) # Add however many copies we need to. for i in range(count): # Find the next available label. self._next_unused_label() # Have we been told to skip this page? if self.pages_to_draw and self.page_count not in self.pages_to_draw: continue # Add the label to the page. ReportLab stores the added drawing by # reference so we have to copy it N times. thislabel = copy(label) thislabel.shift(*self._calculate_edges()) self._current_page.add(thislabel)
python
def _draw_label(self, obj, count): # Start a drawing for the whole label. label = Drawing(float(self._lw), float(self._lh)) label.add(self._clip_label) # And one for the available area (i.e., after padding). available = Drawing(float(self._dw), float(self._dh)) available.add(self._clip_drawing) # Call the drawing function. self.drawing_callable(available, float(self._dw), float(self._dh), obj) # Render the contents on the label. available.shift(float(self._lp), float(self._bp)) label.add(available) # Draw the border if requested. if self.border: label.add(self._border) # Add however many copies we need to. for i in range(count): # Find the next available label. self._next_unused_label() # Have we been told to skip this page? if self.pages_to_draw and self.page_count not in self.pages_to_draw: continue # Add the label to the page. ReportLab stores the added drawing by # reference so we have to copy it N times. thislabel = copy(label) thislabel.shift(*self._calculate_edges()) self._current_page.add(thislabel)
[ "def", "_draw_label", "(", "self", ",", "obj", ",", "count", ")", ":", "# Start a drawing for the whole label.", "label", "=", "Drawing", "(", "float", "(", "self", ".", "_lw", ")", ",", "float", "(", "self", ".", "_lh", ")", ")", "label", ".", "add", ...
Helper method to draw on the current label. Not intended for external use.
[ "Helper", "method", "to", "draw", "on", "the", "current", "label", ".", "Not", "intended", "for", "external", "use", "." ]
ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6
https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L364-L400
7,643
bcbnz/pylabels
labels/sheet.py
Sheet.add_labels
def add_labels(self, objects, count=1): """Add multiple labels to the sheet. Parameters ---------- objects: iterable An iterable of the objects to add. Each of these will be passed to the add_label method. Note that if this is a generator it will be consumed. count: positive integer or iterable of positive integers, default 1 The number of copies of each label to add. If a single integer, that many copies of every label are added. If an iterable, then each value specifies how many copies of the corresponding label to add. The iterables are advanced in parallel until one is exhausted; extra values in the other one are ignored. This means that if there are fewer count entries than objects, the objects corresponding to the missing counts will not be added to the sheet. Note that if this is a generator it will be consumed. Also note that the drawing function will only be called once for each label and the results copied for the repeats. If the drawing function maintains any state internally then using this parameter may break it. """ # If we can convert it to an int, do so and use the itertools.repeat() # method to create an infinite iterator from it. Otherwise, assume it # is an iterable or sequence. try: count = int(count) except TypeError: pass else: count = repeat(count) # If it is not an iterable (e.g., a list or range object), # create an iterator over it. if not hasattr(count, 'next') and not hasattr(count, '__next__'): count = iter(count) # Go through the objects. for obj in objects: # Check we have a count for this one. try: thiscount = next(count) except StopIteration: break # Draw it. self._draw_label(obj, thiscount)
python
def add_labels(self, objects, count=1): # If we can convert it to an int, do so and use the itertools.repeat() # method to create an infinite iterator from it. Otherwise, assume it # is an iterable or sequence. try: count = int(count) except TypeError: pass else: count = repeat(count) # If it is not an iterable (e.g., a list or range object), # create an iterator over it. if not hasattr(count, 'next') and not hasattr(count, '__next__'): count = iter(count) # Go through the objects. for obj in objects: # Check we have a count for this one. try: thiscount = next(count) except StopIteration: break # Draw it. self._draw_label(obj, thiscount)
[ "def", "add_labels", "(", "self", ",", "objects", ",", "count", "=", "1", ")", ":", "# If we can convert it to an int, do so and use the itertools.repeat()", "# method to create an infinite iterator from it. Otherwise, assume it", "# is an iterable or sequence.", "try", ":", "count...
Add multiple labels to the sheet. Parameters ---------- objects: iterable An iterable of the objects to add. Each of these will be passed to the add_label method. Note that if this is a generator it will be consumed. count: positive integer or iterable of positive integers, default 1 The number of copies of each label to add. If a single integer, that many copies of every label are added. If an iterable, then each value specifies how many copies of the corresponding label to add. The iterables are advanced in parallel until one is exhausted; extra values in the other one are ignored. This means that if there are fewer count entries than objects, the objects corresponding to the missing counts will not be added to the sheet. Note that if this is a generator it will be consumed. Also note that the drawing function will only be called once for each label and the results copied for the repeats. If the drawing function maintains any state internally then using this parameter may break it.
[ "Add", "multiple", "labels", "to", "the", "sheet", "." ]
ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6
https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L419-L468
7,644
bcbnz/pylabels
labels/sheet.py
Sheet.save
def save(self, filelike): """Save the file as a PDF. Parameters ---------- filelike: path or file-like object The filename or file-like object to save the labels under. Any existing contents will be overwritten. """ # Shade any remaining missing labels if desired. self._shade_remaining_missing() # Create a canvas. canvas = Canvas(filelike, pagesize=self._pagesize) # Render each created page onto the canvas. for page in self._pages: renderPDF.draw(page, canvas, 0, 0) canvas.showPage() # Done. canvas.save()
python
def save(self, filelike): # Shade any remaining missing labels if desired. self._shade_remaining_missing() # Create a canvas. canvas = Canvas(filelike, pagesize=self._pagesize) # Render each created page onto the canvas. for page in self._pages: renderPDF.draw(page, canvas, 0, 0) canvas.showPage() # Done. canvas.save()
[ "def", "save", "(", "self", ",", "filelike", ")", ":", "# Shade any remaining missing labels if desired.", "self", ".", "_shade_remaining_missing", "(", ")", "# Create a canvas.", "canvas", "=", "Canvas", "(", "filelike", ",", "pagesize", "=", "self", ".", "_pagesiz...
Save the file as a PDF. Parameters ---------- filelike: path or file-like object The filename or file-like object to save the labels under. Any existing contents will be overwritten.
[ "Save", "the", "file", "as", "a", "PDF", "." ]
ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6
https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L470-L492
7,645
bcbnz/pylabels
labels/sheet.py
Sheet.preview
def preview(self, page, filelike, format='png', dpi=72, background_colour=0xFFFFFF): """Render a preview image of a page. Parameters ---------- page: positive integer Which page to render. Must be in the range [1, page_count] filelike: path or file-like object Can be a filename as a string, a Python file object, or something which behaves like a Python file object. For example, if you were using the Django web framework, an HttpResponse object could be passed to render the preview to the browser (as long as you remember to set the mimetype of the response). If you pass a filename, the existing contents will be overwritten. format: string The image format to use for the preview. ReportLab uses the Python Imaging Library (PIL) internally, so any PIL format should be supported. dpi: positive real The dots-per-inch to use when rendering. background_colour: Hex colour specification What color background to use. Notes ----- If you are creating this sheet for a preview only, you can pass the pages_to_draw parameter to the constructor to avoid the drawing function being called for all the labels on pages you'll never look at. If you preview a page you did not tell the sheet to draw, you will get a blank image. Raises ------ ValueError: If the page number is not valid. """ # Check the page number. if page < 1 or page > self.page_count: raise ValueError("Invalid page number; should be between 1 and {0:d}.".format(self.page_count)) # Shade any remaining missing labels if desired. self._shade_remaining_missing() # Rendering to an image (as opposed to a PDF) requires any background # to have an integer width and height if it is a ReportLab Image # object. Drawing objects are exempt from this. oldw, oldh = None, None if isinstance(self._bgimage, Image): oldw, oldh = self._bgimage.width, self._bgimage.height self._bgimage.width = int(oldw) + 1 self._bgimage.height = int(oldh) + 1 # Let ReportLab do the heavy lifting. renderPM.drawToFile(self._pages[page-1], filelike, format, dpi, background_colour) # Restore the size of the background image if we changed it. if oldw: self._bgimage.width = oldw self._bgimage.height = oldh
python
def preview(self, page, filelike, format='png', dpi=72, background_colour=0xFFFFFF): # Check the page number. if page < 1 or page > self.page_count: raise ValueError("Invalid page number; should be between 1 and {0:d}.".format(self.page_count)) # Shade any remaining missing labels if desired. self._shade_remaining_missing() # Rendering to an image (as opposed to a PDF) requires any background # to have an integer width and height if it is a ReportLab Image # object. Drawing objects are exempt from this. oldw, oldh = None, None if isinstance(self._bgimage, Image): oldw, oldh = self._bgimage.width, self._bgimage.height self._bgimage.width = int(oldw) + 1 self._bgimage.height = int(oldh) + 1 # Let ReportLab do the heavy lifting. renderPM.drawToFile(self._pages[page-1], filelike, format, dpi, background_colour) # Restore the size of the background image if we changed it. if oldw: self._bgimage.width = oldw self._bgimage.height = oldh
[ "def", "preview", "(", "self", ",", "page", ",", "filelike", ",", "format", "=", "'png'", ",", "dpi", "=", "72", ",", "background_colour", "=", "0xFFFFFF", ")", ":", "# Check the page number.", "if", "page", "<", "1", "or", "page", ">", "self", ".", "p...
Render a preview image of a page. Parameters ---------- page: positive integer Which page to render. Must be in the range [1, page_count] filelike: path or file-like object Can be a filename as a string, a Python file object, or something which behaves like a Python file object. For example, if you were using the Django web framework, an HttpResponse object could be passed to render the preview to the browser (as long as you remember to set the mimetype of the response). If you pass a filename, the existing contents will be overwritten. format: string The image format to use for the preview. ReportLab uses the Python Imaging Library (PIL) internally, so any PIL format should be supported. dpi: positive real The dots-per-inch to use when rendering. background_colour: Hex colour specification What color background to use. Notes ----- If you are creating this sheet for a preview only, you can pass the pages_to_draw parameter to the constructor to avoid the drawing function being called for all the labels on pages you'll never look at. If you preview a page you did not tell the sheet to draw, you will get a blank image. Raises ------ ValueError: If the page number is not valid.
[ "Render", "a", "preview", "image", "of", "a", "page", "." ]
ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6
https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L494-L553
7,646
bcbnz/pylabels
labels/sheet.py
Sheet.preview_string
def preview_string(self, page, format='png', dpi=72, background_colour=0xFFFFFF): """Render a preview image of a page as a string. Parameters ---------- page: positive integer Which page to render. Must be in the range [1, page_count] format: string The image format to use for the preview. ReportLab uses the Python Imaging Library (PIL) internally, so any PIL format should be supported. dpi: positive real The dots-per-inch to use when rendering. background_colour: Hex colour specification What color background to use. Notes ----- If you are creating this sheet for a preview only, you can pass the pages_to_draw parameter to the constructor to avoid the drawing function being called for all the labels on pages you'll never look at. If you preview a page you did not tell the sheet to draw, you will get a blank image. Raises ------ ValueError: If the page number is not valid. """ # Check the page number. if page < 1 or page > self.page_count: raise ValueError("Invalid page number; should be between 1 and {0:d}.".format(self.page_count)) # Shade any remaining missing labels if desired. self._shade_remaining_missing() # Rendering to an image (as opposed to a PDF) requires any background # to have an integer width and height if it is a ReportLab Image # object. Drawing objects are exempt from this. oldw, oldh = None, None if isinstance(self._bgimage, Image): oldw, oldh = self._bgimage.width, self._bgimage.height self._bgimage.width = int(oldw) + 1 self._bgimage.height = int(oldh) + 1 # Let ReportLab do the heavy lifting. s = renderPM.drawToString(self._pages[page-1], format, dpi, background_colour) # Restore the size of the background image if we changed it. if oldw: self._bgimage.width = oldw self._bgimage.height = oldh # Done. return s
python
def preview_string(self, page, format='png', dpi=72, background_colour=0xFFFFFF): # Check the page number. if page < 1 or page > self.page_count: raise ValueError("Invalid page number; should be between 1 and {0:d}.".format(self.page_count)) # Shade any remaining missing labels if desired. self._shade_remaining_missing() # Rendering to an image (as opposed to a PDF) requires any background # to have an integer width and height if it is a ReportLab Image # object. Drawing objects are exempt from this. oldw, oldh = None, None if isinstance(self._bgimage, Image): oldw, oldh = self._bgimage.width, self._bgimage.height self._bgimage.width = int(oldw) + 1 self._bgimage.height = int(oldh) + 1 # Let ReportLab do the heavy lifting. s = renderPM.drawToString(self._pages[page-1], format, dpi, background_colour) # Restore the size of the background image if we changed it. if oldw: self._bgimage.width = oldw self._bgimage.height = oldh # Done. return s
[ "def", "preview_string", "(", "self", ",", "page", ",", "format", "=", "'png'", ",", "dpi", "=", "72", ",", "background_colour", "=", "0xFFFFFF", ")", ":", "# Check the page number.", "if", "page", "<", "1", "or", "page", ">", "self", ".", "page_count", ...
Render a preview image of a page as a string. Parameters ---------- page: positive integer Which page to render. Must be in the range [1, page_count] format: string The image format to use for the preview. ReportLab uses the Python Imaging Library (PIL) internally, so any PIL format should be supported. dpi: positive real The dots-per-inch to use when rendering. background_colour: Hex colour specification What color background to use. Notes ----- If you are creating this sheet for a preview only, you can pass the pages_to_draw parameter to the constructor to avoid the drawing function being called for all the labels on pages you'll never look at. If you preview a page you did not tell the sheet to draw, you will get a blank image. Raises ------ ValueError: If the page number is not valid.
[ "Render", "a", "preview", "image", "of", "a", "page", "as", "a", "string", "." ]
ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6
https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L555-L610
7,647
bcbnz/pylabels
labels/specifications.py
Specification.bounding_boxes
def bounding_boxes(self, mode='fraction', output='dict'): """Get the bounding boxes of the labels on a page. Parameters ---------- mode: 'fraction', 'actual' If 'fraction', the bounding boxes are expressed as a fraction of the height and width of the sheet. If 'actual', they are the actual position of the labels in millimetres from the top-left of the sheet. output: 'dict', 'json' If 'dict', a dictionary with label identifier tuples (row, column) as keys and a dictionary with 'left', 'right', 'top', and 'bottom' entries as the values. If 'json', a JSON encoded string which represents a dictionary with keys of the string format 'rowxcolumn' and each value being a bounding box dictionary with 'left', 'right', 'top', and 'bottom' entries. Returns ------- The bounding boxes in the format set by the output parameter. """ boxes = {} # Check the parameters. if mode not in ('fraction', 'actual'): raise ValueError("Unknown mode {0}.".format(mode)) if output not in ('dict', 'json'): raise ValueError("Unknown output {0}.".format(output)) # Iterate over the rows. for row in range(1, self.rows + 1): # Top and bottom of all labels in the row. top = self.top_margin + ((row - 1) * (self.label_height + self.row_gap)) bottom = top + self.label_height # Now iterate over all columns in this row. for column in range(1, self.columns + 1): # Left and right position of this column. left = self.left_margin + ((column - 1) * (self.label_width + self.column_gap)) right = left + self.label_width # Output in the appropriate mode format. if mode == 'fraction': box = { 'top': top / self.sheet_height, 'bottom': bottom / self.sheet_height, 'left': left / self.sheet_width, 'right': right / self.sheet_width, } elif mode == 'actual': box = {'top': top, 'bottom': bottom, 'left': left, 'right': right} # Add to the collection. if output == 'json': boxes['{0:d}x{1:d}'.format(row, column)] = box box['top'] = float(box['top']) box['bottom'] = float(box['bottom']) box['left'] = float(box['left']) box['right'] = float(box['right']) else: boxes[(row, column)] = box # Done. if output == 'json': return json.dumps(boxes) return boxes
python
def bounding_boxes(self, mode='fraction', output='dict'): boxes = {} # Check the parameters. if mode not in ('fraction', 'actual'): raise ValueError("Unknown mode {0}.".format(mode)) if output not in ('dict', 'json'): raise ValueError("Unknown output {0}.".format(output)) # Iterate over the rows. for row in range(1, self.rows + 1): # Top and bottom of all labels in the row. top = self.top_margin + ((row - 1) * (self.label_height + self.row_gap)) bottom = top + self.label_height # Now iterate over all columns in this row. for column in range(1, self.columns + 1): # Left and right position of this column. left = self.left_margin + ((column - 1) * (self.label_width + self.column_gap)) right = left + self.label_width # Output in the appropriate mode format. if mode == 'fraction': box = { 'top': top / self.sheet_height, 'bottom': bottom / self.sheet_height, 'left': left / self.sheet_width, 'right': right / self.sheet_width, } elif mode == 'actual': box = {'top': top, 'bottom': bottom, 'left': left, 'right': right} # Add to the collection. if output == 'json': boxes['{0:d}x{1:d}'.format(row, column)] = box box['top'] = float(box['top']) box['bottom'] = float(box['bottom']) box['left'] = float(box['left']) box['right'] = float(box['right']) else: boxes[(row, column)] = box # Done. if output == 'json': return json.dumps(boxes) return boxes
[ "def", "bounding_boxes", "(", "self", ",", "mode", "=", "'fraction'", ",", "output", "=", "'dict'", ")", ":", "boxes", "=", "{", "}", "# Check the parameters.", "if", "mode", "not", "in", "(", "'fraction'", ",", "'actual'", ")", ":", "raise", "ValueError",...
Get the bounding boxes of the labels on a page. Parameters ---------- mode: 'fraction', 'actual' If 'fraction', the bounding boxes are expressed as a fraction of the height and width of the sheet. If 'actual', they are the actual position of the labels in millimetres from the top-left of the sheet. output: 'dict', 'json' If 'dict', a dictionary with label identifier tuples (row, column) as keys and a dictionary with 'left', 'right', 'top', and 'bottom' entries as the values. If 'json', a JSON encoded string which represents a dictionary with keys of the string format 'rowxcolumn' and each value being a bounding box dictionary with 'left', 'right', 'top', and 'bottom' entries. Returns ------- The bounding boxes in the format set by the output parameter.
[ "Get", "the", "bounding", "boxes", "of", "the", "labels", "on", "a", "page", "." ]
ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6
https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/specifications.py#L257-L325
7,648
estnltk/estnltk
estnltk/wiki/parser.py
templatesCollector
def templatesCollector(text, open, close): """leaves related articles and wikitables in place""" others = [] spans = [i for i in findBalanced(text, open, close)] spanscopy = copy(spans) for i in range(len(spans)): start, end = spans[i] o = text[start:end] ol = o.lower() if 'vaata|' in ol or 'wikitable' in ol: spanscopy.remove(spans[i]) continue others.append(o) text = dropSpans(spanscopy, text) return text, others
python
def templatesCollector(text, open, close): others = [] spans = [i for i in findBalanced(text, open, close)] spanscopy = copy(spans) for i in range(len(spans)): start, end = spans[i] o = text[start:end] ol = o.lower() if 'vaata|' in ol or 'wikitable' in ol: spanscopy.remove(spans[i]) continue others.append(o) text = dropSpans(spanscopy, text) return text, others
[ "def", "templatesCollector", "(", "text", ",", "open", ",", "close", ")", ":", "others", "=", "[", "]", "spans", "=", "[", "i", "for", "i", "in", "findBalanced", "(", "text", ",", "open", ",", "close", ")", "]", "spanscopy", "=", "copy", "(", "span...
leaves related articles and wikitables in place
[ "leaves", "related", "articles", "and", "wikitables", "in", "place" ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wiki/parser.py#L66-L82
7,649
estnltk/estnltk
estnltk/prettyprinter/prettyprinter.py
assert_legal_arguments
def assert_legal_arguments(kwargs): """Assert that PrettyPrinter arguments are correct. Raises ------ ValueError In case there are unknown arguments or a single layer is mapped to more than one aesthetic. """ seen_layers = set() for k, v in kwargs.items(): if k not in LEGAL_ARGUMENTS: raise ValueError('Illegal argument <{0}>!'.format(k)) if k in AESTHETICS: if v in seen_layers: raise ValueError('Layer <{0}> mapped for more than a single aesthetic!'.format(v)) seen_layers.add(v) if k in VALUES: if not isinstance(v, six.string_types) and not isinstance(v, list): raise ValueError('Value <{0}> must be either string or list'.format(k)) if isinstance(v, list): if len(v) == 0: raise ValueError('Rules cannot be empty list') for rule_matcher, rule_value in v: if not isinstance(rule_matcher, six.string_types) or not isinstance(rule_value, six.string_types): raise ValueError('Rule tuple elements must be strings')
python
def assert_legal_arguments(kwargs): seen_layers = set() for k, v in kwargs.items(): if k not in LEGAL_ARGUMENTS: raise ValueError('Illegal argument <{0}>!'.format(k)) if k in AESTHETICS: if v in seen_layers: raise ValueError('Layer <{0}> mapped for more than a single aesthetic!'.format(v)) seen_layers.add(v) if k in VALUES: if not isinstance(v, six.string_types) and not isinstance(v, list): raise ValueError('Value <{0}> must be either string or list'.format(k)) if isinstance(v, list): if len(v) == 0: raise ValueError('Rules cannot be empty list') for rule_matcher, rule_value in v: if not isinstance(rule_matcher, six.string_types) or not isinstance(rule_value, six.string_types): raise ValueError('Rule tuple elements must be strings')
[ "def", "assert_legal_arguments", "(", "kwargs", ")", ":", "seen_layers", "=", "set", "(", ")", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "if", "k", "not", "in", "LEGAL_ARGUMENTS", ":", "raise", "ValueError", "(", "'Illegal argume...
Assert that PrettyPrinter arguments are correct. Raises ------ ValueError In case there are unknown arguments or a single layer is mapped to more than one aesthetic.
[ "Assert", "that", "PrettyPrinter", "arguments", "are", "correct", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/prettyprinter/prettyprinter.py#L17-L41
7,650
estnltk/estnltk
estnltk/prettyprinter/prettyprinter.py
parse_arguments
def parse_arguments(kwargs): """Function that parses PrettyPrinter arguments. Detects which aesthetics are mapped to which layers and collects user-provided values. Parameters ---------- kwargs: dict The keyword arguments to PrettyPrinter. Returns ------- dict, dict First dictionary is aesthetic to layer mapping. Second dictionary is aesthetic to user value mapping. """ aesthetics = {} values = {} for aes in AESTHETICS: if aes in kwargs: aesthetics[aes] = kwargs[aes] val_name = AES_VALUE_MAP[aes] # map the user-provided CSS value or use the default values[aes] = kwargs.get(val_name, DEFAULT_VALUE_MAP[aes]) return aesthetics, values
python
def parse_arguments(kwargs): aesthetics = {} values = {} for aes in AESTHETICS: if aes in kwargs: aesthetics[aes] = kwargs[aes] val_name = AES_VALUE_MAP[aes] # map the user-provided CSS value or use the default values[aes] = kwargs.get(val_name, DEFAULT_VALUE_MAP[aes]) return aesthetics, values
[ "def", "parse_arguments", "(", "kwargs", ")", ":", "aesthetics", "=", "{", "}", "values", "=", "{", "}", "for", "aes", "in", "AESTHETICS", ":", "if", "aes", "in", "kwargs", ":", "aesthetics", "[", "aes", "]", "=", "kwargs", "[", "aes", "]", "val_name...
Function that parses PrettyPrinter arguments. Detects which aesthetics are mapped to which layers and collects user-provided values. Parameters ---------- kwargs: dict The keyword arguments to PrettyPrinter. Returns ------- dict, dict First dictionary is aesthetic to layer mapping. Second dictionary is aesthetic to user value mapping.
[ "Function", "that", "parses", "PrettyPrinter", "arguments", ".", "Detects", "which", "aesthetics", "are", "mapped", "to", "which", "layers", "and", "collects", "user", "-", "provided", "values", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/prettyprinter/prettyprinter.py#L44-L68
7,651
estnltk/estnltk
estnltk/prettyprinter/prettyprinter.py
PrettyPrinter.render
def render(self, text, add_header=False): """Render the HTML. Parameters ---------- add_header: boolean (default: False) If True, add HTML5 header and footer. Returns ------- str The rendered HTML. """ html = mark_text(text, self.aesthetics, self.rules) html = html.replace('\n', '<br/>') if add_header: html = '\n'.join([HEADER, self.css, MIDDLE, html, FOOTER]) #print('\n'.join((HEADER, self.css, MIDDLE, html, FOOTER))) return html
python
def render(self, text, add_header=False): html = mark_text(text, self.aesthetics, self.rules) html = html.replace('\n', '<br/>') if add_header: html = '\n'.join([HEADER, self.css, MIDDLE, html, FOOTER]) #print('\n'.join((HEADER, self.css, MIDDLE, html, FOOTER))) return html
[ "def", "render", "(", "self", ",", "text", ",", "add_header", "=", "False", ")", ":", "html", "=", "mark_text", "(", "text", ",", "self", ".", "aesthetics", ",", "self", ".", "rules", ")", "html", "=", "html", ".", "replace", "(", "'\\n'", ",", "'<...
Render the HTML. Parameters ---------- add_header: boolean (default: False) If True, add HTML5 header and footer. Returns ------- str The rendered HTML.
[ "Render", "the", "HTML", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/prettyprinter/prettyprinter.py#L121-L140
7,652
estnltk/estnltk
estnltk/estner/crfsuiteutil.py
Trainer.train
def train(self, nerdocs, mode_filename): """Train a CRF model using given documents. Parameters ---------- nerdocs: list of estnltk.estner.ner.Document. The documents for model training. mode_filename: str The fielname where to save the model. """ trainer = pycrfsuite.Trainer(algorithm=self.algorithm, params={'c2': self.c2}, verbose=self.verbose) for doc in nerdocs: for snt in doc.sentences: xseq = [t.feature_list() for t in snt] yseq = [t.label for t in snt] trainer.append(xseq, yseq) trainer.train(mode_filename)
python
def train(self, nerdocs, mode_filename): trainer = pycrfsuite.Trainer(algorithm=self.algorithm, params={'c2': self.c2}, verbose=self.verbose) for doc in nerdocs: for snt in doc.sentences: xseq = [t.feature_list() for t in snt] yseq = [t.label for t in snt] trainer.append(xseq, yseq) trainer.train(mode_filename)
[ "def", "train", "(", "self", ",", "nerdocs", ",", "mode_filename", ")", ":", "trainer", "=", "pycrfsuite", ".", "Trainer", "(", "algorithm", "=", "self", ".", "algorithm", ",", "params", "=", "{", "'c2'", ":", "self", ".", "c2", "}", ",", "verbose", ...
Train a CRF model using given documents. Parameters ---------- nerdocs: list of estnltk.estner.ner.Document. The documents for model training. mode_filename: str The fielname where to save the model.
[ "Train", "a", "CRF", "model", "using", "given", "documents", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/estner/crfsuiteutil.py#L28-L49
7,653
estnltk/estnltk
estnltk/wiki/convert.py
json_2_text
def json_2_text(inp, out, verbose = False): """Convert a Wikipedia article to Text object. Concatenates the sections in wikipedia file and rearranges other information so it can be interpreted as a Text object. Links and other elements with start and end positions are annotated as layers. Parameters ---------- inp: directory of parsed et.wikipedia articles in json format out: output directory of .txt files verbose: if True, prints every article title and total count of converted files if False prints every 50th count Returns ------- estnltk.text.Text The Text object. """ for root, dirs, filenames in os.walk(inp): for f in filenames: log = codecs.open(os.path.join(root, f), 'r') j_obj = json.load(log) j_obj = json_format(j_obj) #not needed, cause the json_format takes care of the right structuring #text = Text(j_obj) textWriter(j_obj, out, verbose)
python
def json_2_text(inp, out, verbose = False): for root, dirs, filenames in os.walk(inp): for f in filenames: log = codecs.open(os.path.join(root, f), 'r') j_obj = json.load(log) j_obj = json_format(j_obj) #not needed, cause the json_format takes care of the right structuring #text = Text(j_obj) textWriter(j_obj, out, verbose)
[ "def", "json_2_text", "(", "inp", ",", "out", ",", "verbose", "=", "False", ")", ":", "for", "root", ",", "dirs", ",", "filenames", "in", "os", ".", "walk", "(", "inp", ")", ":", "for", "f", "in", "filenames", ":", "log", "=", "codecs", ".", "ope...
Convert a Wikipedia article to Text object. Concatenates the sections in wikipedia file and rearranges other information so it can be interpreted as a Text object. Links and other elements with start and end positions are annotated as layers. Parameters ---------- inp: directory of parsed et.wikipedia articles in json format out: output directory of .txt files verbose: if True, prints every article title and total count of converted files if False prints every 50th count Returns ------- estnltk.text.Text The Text object.
[ "Convert", "a", "Wikipedia", "article", "to", "Text", "object", ".", "Concatenates", "the", "sections", "in", "wikipedia", "file", "and", "rearranges", "other", "information", "so", "it", "can", "be", "interpreted", "as", "a", "Text", "object", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wiki/convert.py#L95-L126
7,654
estnltk/estnltk
estnltk/grammar/match.py
concatenate_matches
def concatenate_matches(a, b, text, name): """Concatenate matches a and b. All submatches will be copied to result.""" match = Match(a.start, b.end, text[a.start:b.end], name) for k, v in a.matches.items(): match.matches[k] = v for k, v in b.matches.items(): match.matches[k] = v if a.name is not None: aa = copy(a) del aa[MATCHES] match.matches[a.name] = aa if b.name is not None: bb = copy(b) del bb[MATCHES] match.matches[b.name] = bb return match
python
def concatenate_matches(a, b, text, name): match = Match(a.start, b.end, text[a.start:b.end], name) for k, v in a.matches.items(): match.matches[k] = v for k, v in b.matches.items(): match.matches[k] = v if a.name is not None: aa = copy(a) del aa[MATCHES] match.matches[a.name] = aa if b.name is not None: bb = copy(b) del bb[MATCHES] match.matches[b.name] = bb return match
[ "def", "concatenate_matches", "(", "a", ",", "b", ",", "text", ",", "name", ")", ":", "match", "=", "Match", "(", "a", ".", "start", ",", "b", ".", "end", ",", "text", "[", "a", ".", "start", ":", "b", ".", "end", "]", ",", "name", ")", "for"...
Concatenate matches a and b. All submatches will be copied to result.
[ "Concatenate", "matches", "a", "and", "b", ".", "All", "submatches", "will", "be", "copied", "to", "result", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/grammar/match.py#L81-L97
7,655
estnltk/estnltk
estnltk/grammar/match.py
Match.dict
def dict(self): """Dictionary representing this match and all child symbol matches.""" res = copy(self) if MATCHES in res: del res[MATCHES] if NAME in res: del res[NAME] res = {self.name: res} for k, v in self.matches.items(): res[k] = v if NAME in res[k]: del res[k][NAME] return res
python
def dict(self): res = copy(self) if MATCHES in res: del res[MATCHES] if NAME in res: del res[NAME] res = {self.name: res} for k, v in self.matches.items(): res[k] = v if NAME in res[k]: del res[k][NAME] return res
[ "def", "dict", "(", "self", ")", ":", "res", "=", "copy", "(", "self", ")", "if", "MATCHES", "in", "res", ":", "del", "res", "[", "MATCHES", "]", "if", "NAME", "in", "res", ":", "del", "res", "[", "NAME", "]", "res", "=", "{", "self", ".", "n...
Dictionary representing this match and all child symbol matches.
[ "Dictionary", "representing", "this", "match", "and", "all", "child", "symbol", "matches", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/grammar/match.py#L54-L66
7,656
estnltk/estnltk
estnltk/vabamorf/morf.py
regex_from_markers
def regex_from_markers(markers): """Given a string of characters, construct a regex that matches them. Parameters ---------- markers: str The list of string containing the markers Returns ------- regex The regular expression matching the given markers. """ return re.compile('|'.join([re.escape(c) for c in markers]))
python
def regex_from_markers(markers): return re.compile('|'.join([re.escape(c) for c in markers]))
[ "def", "regex_from_markers", "(", "markers", ")", ":", "return", "re", ".", "compile", "(", "'|'", ".", "join", "(", "[", "re", ".", "escape", "(", "c", ")", "for", "c", "in", "markers", "]", ")", ")" ]
Given a string of characters, construct a regex that matches them. Parameters ---------- markers: str The list of string containing the markers Returns ------- regex The regular expression matching the given markers.
[ "Given", "a", "string", "of", "characters", "construct", "a", "regex", "that", "matches", "them", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L45-L58
7,657
estnltk/estnltk
estnltk/vabamorf/morf.py
convert
def convert(word): """This method converts given `word` to UTF-8 encoding and `bytes` type for the SWIG wrapper.""" if six.PY2: if isinstance(word, unicode): return word.encode('utf-8') else: return word.decode('utf-8').encode('utf-8') # make sure it is real utf8, otherwise complain else: # ==> Py3 if isinstance(word, bytes): return word.decode('utf-8') # bytes must be in utf8 return word
python
def convert(word): if six.PY2: if isinstance(word, unicode): return word.encode('utf-8') else: return word.decode('utf-8').encode('utf-8') # make sure it is real utf8, otherwise complain else: # ==> Py3 if isinstance(word, bytes): return word.decode('utf-8') # bytes must be in utf8 return word
[ "def", "convert", "(", "word", ")", ":", "if", "six", ".", "PY2", ":", "if", "isinstance", "(", "word", ",", "unicode", ")", ":", "return", "word", ".", "encode", "(", "'utf-8'", ")", "else", ":", "return", "word", ".", "decode", "(", "'utf-8'", ")...
This method converts given `word` to UTF-8 encoding and `bytes` type for the SWIG wrapper.
[ "This", "method", "converts", "given", "word", "to", "UTF", "-", "8", "encoding", "and", "bytes", "type", "for", "the", "SWIG", "wrapper", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L64-L75
7,658
estnltk/estnltk
estnltk/vabamorf/morf.py
postprocess_result
def postprocess_result(morphresult, trim_phonetic, trim_compound): """Postprocess vabamorf wrapper output.""" word, analysis = morphresult return { 'text': deconvert(word), 'analysis': [postprocess_analysis(a, trim_phonetic, trim_compound) for a in analysis] }
python
def postprocess_result(morphresult, trim_phonetic, trim_compound): word, analysis = morphresult return { 'text': deconvert(word), 'analysis': [postprocess_analysis(a, trim_phonetic, trim_compound) for a in analysis] }
[ "def", "postprocess_result", "(", "morphresult", ",", "trim_phonetic", ",", "trim_compound", ")", ":", "word", ",", "analysis", "=", "morphresult", "return", "{", "'text'", ":", "deconvert", "(", "word", ")", ",", "'analysis'", ":", "[", "postprocess_analysis", ...
Postprocess vabamorf wrapper output.
[ "Postprocess", "vabamorf", "wrapper", "output", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L302-L308
7,659
estnltk/estnltk
estnltk/vabamorf/morf.py
trim_phonetics
def trim_phonetics(root): """Function that trims phonetic markup from the root. Parameters ---------- root: str The string to remove the phonetic markup. Returns ------- str The string with phonetic markup removed. """ global phonetic_markers global phonetic_regex if root in phonetic_markers: return root else: return phonetic_regex.sub('', root)
python
def trim_phonetics(root): global phonetic_markers global phonetic_regex if root in phonetic_markers: return root else: return phonetic_regex.sub('', root)
[ "def", "trim_phonetics", "(", "root", ")", ":", "global", "phonetic_markers", "global", "phonetic_regex", "if", "root", "in", "phonetic_markers", ":", "return", "root", "else", ":", "return", "phonetic_regex", ".", "sub", "(", "''", ",", "root", ")" ]
Function that trims phonetic markup from the root. Parameters ---------- root: str The string to remove the phonetic markup. Returns ------- str The string with phonetic markup removed.
[ "Function", "that", "trims", "phonetic", "markup", "from", "the", "root", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L330-L348
7,660
estnltk/estnltk
estnltk/vabamorf/morf.py
get_root
def get_root(root, phonetic, compound): """Get the root form without markers. Parameters ---------- root: str The word root form. phonetic: boolean If True, add phonetic information to the root forms. compound: boolean if True, add compound word markers to root forms. """ global compound_regex if not phonetic: root = trim_phonetics(root) if not compound: root = trim_compounds(root) return root
python
def get_root(root, phonetic, compound): global compound_regex if not phonetic: root = trim_phonetics(root) if not compound: root = trim_compounds(root) return root
[ "def", "get_root", "(", "root", ",", "phonetic", ",", "compound", ")", ":", "global", "compound_regex", "if", "not", "phonetic", ":", "root", "=", "trim_phonetics", "(", "root", ")", "if", "not", "compound", ":", "root", "=", "trim_compounds", "(", "root",...
Get the root form without markers. Parameters ---------- root: str The word root form. phonetic: boolean If True, add phonetic information to the root forms. compound: boolean if True, add compound word markers to root forms.
[ "Get", "the", "root", "form", "without", "markers", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L370-L387
7,661
estnltk/estnltk
estnltk/vabamorf/morf.py
Vabamorf.instance
def instance(): """Return an PyVabamorf instance. It returns the previously initialized instance or creates a new one if nothing exists. Also creates new instance in case the process has been forked. """ if not hasattr(Vabamorf, 'pid') or Vabamorf.pid != os.getpid(): Vabamorf.pid = os.getpid() Vabamorf.morf = Vabamorf() return Vabamorf.morf
python
def instance(): if not hasattr(Vabamorf, 'pid') or Vabamorf.pid != os.getpid(): Vabamorf.pid = os.getpid() Vabamorf.morf = Vabamorf() return Vabamorf.morf
[ "def", "instance", "(", ")", ":", "if", "not", "hasattr", "(", "Vabamorf", ",", "'pid'", ")", "or", "Vabamorf", ".", "pid", "!=", "os", ".", "getpid", "(", ")", ":", "Vabamorf", ".", "pid", "=", "os", ".", "getpid", "(", ")", "Vabamorf", ".", "mo...
Return an PyVabamorf instance. It returns the previously initialized instance or creates a new one if nothing exists. Also creates new instance in case the process has been forked.
[ "Return", "an", "PyVabamorf", "instance", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L101-L111
7,662
estnltk/estnltk
estnltk/vabamorf/morf.py
Vabamorf.analyze
def analyze(self, words, **kwargs): """Perform morphological analysis and disambiguation of given text. Parameters ---------- words: list of str or str Either a list of pretokenized words or a string. In case of a string, it will be splitted using default behaviour of string.split() function. disambiguate: boolean (default: True) Disambiguate the output and remove incosistent analysis. guess: boolean (default: True) Use guessing in case of unknown words propername: boolean (default: True) Perform additional analysis of proper names. compound: boolean (default: True) Add compound word markers to root forms. phonetic: boolean (default: False) Add phonetic information to root forms. Returns ------- list of (list of dict) List of analysis for each word in input. """ # if input is a string, then tokenize it if isinstance(words, six.string_types): words = words.split() # convert words to native strings words = [convert(w) for w in words] morfresults = self._morf.analyze( vm.StringVector(words), kwargs.get('disambiguate', True), kwargs.get('guess', True), True, # phonetic and compound information kwargs.get('propername', True)) trim_phonetic = kwargs.get('phonetic', False) trim_compound = kwargs.get('compound', True) return [postprocess_result(mr, trim_phonetic, trim_compound) for mr in morfresults]
python
def analyze(self, words, **kwargs): # if input is a string, then tokenize it if isinstance(words, six.string_types): words = words.split() # convert words to native strings words = [convert(w) for w in words] morfresults = self._morf.analyze( vm.StringVector(words), kwargs.get('disambiguate', True), kwargs.get('guess', True), True, # phonetic and compound information kwargs.get('propername', True)) trim_phonetic = kwargs.get('phonetic', False) trim_compound = kwargs.get('compound', True) return [postprocess_result(mr, trim_phonetic, trim_compound) for mr in morfresults]
[ "def", "analyze", "(", "self", ",", "words", ",", "*", "*", "kwargs", ")", ":", "# if input is a string, then tokenize it", "if", "isinstance", "(", "words", ",", "six", ".", "string_types", ")", ":", "words", "=", "words", ".", "split", "(", ")", "# conve...
Perform morphological analysis and disambiguation of given text. Parameters ---------- words: list of str or str Either a list of pretokenized words or a string. In case of a string, it will be splitted using default behaviour of string.split() function. disambiguate: boolean (default: True) Disambiguate the output and remove incosistent analysis. guess: boolean (default: True) Use guessing in case of unknown words propername: boolean (default: True) Perform additional analysis of proper names. compound: boolean (default: True) Add compound word markers to root forms. phonetic: boolean (default: False) Add phonetic information to root forms. Returns ------- list of (list of dict) List of analysis for each word in input.
[ "Perform", "morphological", "analysis", "and", "disambiguation", "of", "given", "text", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L129-L169
7,663
estnltk/estnltk
estnltk/vabamorf/morf.py
Vabamorf.disambiguate
def disambiguate(self, words): """Disambiguate previously analyzed words. Parameters ---------- words: list of dict A sentence of words. Returns ------- list of dict Sentence of disambiguated words. """ words = vm.SentenceAnalysis([as_wordanalysis(w) for w in words]) disambiguated = self._morf.disambiguate(words) return [postprocess_result(mr, False, True) for mr in disambiguated]
python
def disambiguate(self, words): words = vm.SentenceAnalysis([as_wordanalysis(w) for w in words]) disambiguated = self._morf.disambiguate(words) return [postprocess_result(mr, False, True) for mr in disambiguated]
[ "def", "disambiguate", "(", "self", ",", "words", ")", ":", "words", "=", "vm", ".", "SentenceAnalysis", "(", "[", "as_wordanalysis", "(", "w", ")", "for", "w", "in", "words", "]", ")", "disambiguated", "=", "self", ".", "_morf", ".", "disambiguate", "...
Disambiguate previously analyzed words. Parameters ---------- words: list of dict A sentence of words. Returns ------- list of dict Sentence of disambiguated words.
[ "Disambiguate", "previously", "analyzed", "words", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L171-L186
7,664
estnltk/estnltk
estnltk/vabamorf/morf.py
Vabamorf.spellcheck
def spellcheck(self, words, suggestions=True): """Spellcheck given sentence. Note that spellchecker does not respect pre-tokenized words and concatenates token sequences such as "New York". Parameters ---------- words: list of str or str Either a list of pretokenized words or a string. In case of a string, it will be splitted using default behaviour of string.split() function. suggestions: boolean (default: True) Add spell suggestions to result. Returns ------- list of dict Each dictionary contains following values: 'word': the original word 'spelling': True, if the word was spelled correctly 'suggestions': list of suggested strings in case of incorrect spelling """ if isinstance(words, six.string_types): words = words.split() # convert words to native strings words = [convert(w) for w in words] spellresults = self._morf.spellcheck(words, suggestions) results = [] for spellresult in spellresults: suggestions = [deconvert(s) for s in spellresult.suggestions] result = { 'text': deconvert(spellresult.word), 'spelling': spellresult.spelling, 'suggestions': suggestions } results.append(result) return results
python
def spellcheck(self, words, suggestions=True): if isinstance(words, six.string_types): words = words.split() # convert words to native strings words = [convert(w) for w in words] spellresults = self._morf.spellcheck(words, suggestions) results = [] for spellresult in spellresults: suggestions = [deconvert(s) for s in spellresult.suggestions] result = { 'text': deconvert(spellresult.word), 'spelling': spellresult.spelling, 'suggestions': suggestions } results.append(result) return results
[ "def", "spellcheck", "(", "self", ",", "words", ",", "suggestions", "=", "True", ")", ":", "if", "isinstance", "(", "words", ",", "six", ".", "string_types", ")", ":", "words", "=", "words", ".", "split", "(", ")", "# convert words to native strings", "wor...
Spellcheck given sentence. Note that spellchecker does not respect pre-tokenized words and concatenates token sequences such as "New York". Parameters ---------- words: list of str or str Either a list of pretokenized words or a string. In case of a string, it will be splitted using default behaviour of string.split() function. suggestions: boolean (default: True) Add spell suggestions to result. Returns ------- list of dict Each dictionary contains following values: 'word': the original word 'spelling': True, if the word was spelled correctly 'suggestions': list of suggested strings in case of incorrect spelling
[ "Spellcheck", "given", "sentence", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L188-L226
7,665
estnltk/estnltk
estnltk/clausesegmenter.py
ClauseSegmenter.annotate_indices
def annotate_indices(self, sentence): """Add clause indexes to already annotated sentence.""" max_index = 0 max_depth = 1 stack_of_indexes = [ max_index ] for token in sentence: if CLAUSE_ANNOT not in token: token[CLAUSE_IDX] = stack_of_indexes[-1] else: # Alustavad märgendused for annotation in token[CLAUSE_ANNOT]: if annotation == "KIILU_ALGUS": # Liigume sügavamale, alustame järgmist kiilu max_index += 1 stack_of_indexes.append(max_index) if (len(stack_of_indexes) > max_depth): max_depth = len(stack_of_indexes) token[CLAUSE_IDX] = stack_of_indexes[-1] # Lõpetavad märgendused for annotation in token[CLAUSE_ANNOT]: if annotation == "KINDEL_PIIR": # Liigume edasi samal tasandil, alustame järgmist osalauset max_index += 1 stack_of_indexes[-1] = max_index elif annotation == "KIILU_LOPP": # Taandume sügavusest, sulgeme ühe kiilu stack_of_indexes.pop() return sentence
python
def annotate_indices(self, sentence): max_index = 0 max_depth = 1 stack_of_indexes = [ max_index ] for token in sentence: if CLAUSE_ANNOT not in token: token[CLAUSE_IDX] = stack_of_indexes[-1] else: # Alustavad märgendused for annotation in token[CLAUSE_ANNOT]: if annotation == "KIILU_ALGUS": # Liigume sügavamale, alustame järgmist kiilu max_index += 1 stack_of_indexes.append(max_index) if (len(stack_of_indexes) > max_depth): max_depth = len(stack_of_indexes) token[CLAUSE_IDX] = stack_of_indexes[-1] # Lõpetavad märgendused for annotation in token[CLAUSE_ANNOT]: if annotation == "KINDEL_PIIR": # Liigume edasi samal tasandil, alustame järgmist osalauset max_index += 1 stack_of_indexes[-1] = max_index elif annotation == "KIILU_LOPP": # Taandume sügavusest, sulgeme ühe kiilu stack_of_indexes.pop() return sentence
[ "def", "annotate_indices", "(", "self", ",", "sentence", ")", ":", "max_index", "=", "0", "max_depth", "=", "1", "stack_of_indexes", "=", "[", "max_index", "]", "for", "token", "in", "sentence", ":", "if", "CLAUSE_ANNOT", "not", "in", "token", ":", "token"...
Add clause indexes to already annotated sentence.
[ "Add", "clause", "indexes", "to", "already", "annotated", "sentence", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/clausesegmenter.py#L64-L91
7,666
estnltk/estnltk
estnltk/clausesegmenter.py
ClauseSegmenter.rename_annotations
def rename_annotations(self, sentence): """Function that renames and restructures clause information.""" annotations = [] for token in sentence: data = {CLAUSE_IDX: token[CLAUSE_IDX]} if CLAUSE_ANNOT in token: if 'KINDEL_PIIR' in token[CLAUSE_ANNOT]: data[CLAUSE_ANNOTATION] = CLAUSE_BOUNDARY elif 'KIILU_ALGUS' in token[CLAUSE_ANNOT]: data[CLAUSE_ANNOTATION] = EMBEDDED_CLAUSE_START elif 'KIILU_LOPP' in token[CLAUSE_ANNOT]: data[CLAUSE_ANNOTATION] = EMBEDDED_CLAUSE_END annotations.append(data) return annotations
python
def rename_annotations(self, sentence): annotations = [] for token in sentence: data = {CLAUSE_IDX: token[CLAUSE_IDX]} if CLAUSE_ANNOT in token: if 'KINDEL_PIIR' in token[CLAUSE_ANNOT]: data[CLAUSE_ANNOTATION] = CLAUSE_BOUNDARY elif 'KIILU_ALGUS' in token[CLAUSE_ANNOT]: data[CLAUSE_ANNOTATION] = EMBEDDED_CLAUSE_START elif 'KIILU_LOPP' in token[CLAUSE_ANNOT]: data[CLAUSE_ANNOTATION] = EMBEDDED_CLAUSE_END annotations.append(data) return annotations
[ "def", "rename_annotations", "(", "self", ",", "sentence", ")", ":", "annotations", "=", "[", "]", "for", "token", "in", "sentence", ":", "data", "=", "{", "CLAUSE_IDX", ":", "token", "[", "CLAUSE_IDX", "]", "}", "if", "CLAUSE_ANNOT", "in", "token", ":",...
Function that renames and restructures clause information.
[ "Function", "that", "renames", "and", "restructures", "clause", "information", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/clausesegmenter.py#L93-L106
7,667
estnltk/estnltk
estnltk/tools/train_default_ner_model.py
train_default_model
def train_default_model(): """Function for training the default NER model. NB! It overwrites the default model, so do not use it unless you know what are you doing. The training data is in file estnltk/corpora/estner.json.bz2 . The resulting model will be saved to estnltk/estner/models/default.bin """ docs = read_json_corpus(DEFAULT_NER_DATASET) trainer = NerTrainer(default_nersettings) trainer.train(docs, DEFAULT_NER_MODEL_DIR)
python
def train_default_model(): docs = read_json_corpus(DEFAULT_NER_DATASET) trainer = NerTrainer(default_nersettings) trainer.train(docs, DEFAULT_NER_MODEL_DIR)
[ "def", "train_default_model", "(", ")", ":", "docs", "=", "read_json_corpus", "(", "DEFAULT_NER_DATASET", ")", "trainer", "=", "NerTrainer", "(", "default_nersettings", ")", "trainer", ".", "train", "(", "docs", ",", "DEFAULT_NER_MODEL_DIR", ")" ]
Function for training the default NER model. NB! It overwrites the default model, so do not use it unless you know what are you doing. The training data is in file estnltk/corpora/estner.json.bz2 . The resulting model will be saved to estnltk/estner/models/default.bin
[ "Function", "for", "training", "the", "default", "NER", "model", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/tools/train_default_ner_model.py#L10-L21
7,668
estnltk/estnltk
estnltk/wordnet/wn.py
_get_synset_offsets
def _get_synset_offsets(synset_idxes): """Returs pointer offset in the WordNet file for every synset index. Notes ----- Internal function. Do not call directly. Preserves order -- for [x,y,z] returns [offset(x),offset(y),offset(z)]. Parameters ---------- synset_idxes : list of ints Lists synset IDs, which need offset. Returns ------- list of ints Lists pointer offsets in Wordnet file. """ offsets = {} current_seeked_offset_idx = 0 ordered_synset_idxes = sorted(synset_idxes) with codecs.open(_SOI,'rb', 'utf-8') as fin: for line in fin: split_line = line.split(':') while current_seeked_offset_idx < len(ordered_synset_idxes) and split_line[0] == str(ordered_synset_idxes[current_seeked_offset_idx]): # Looping on single line entries in case synset_indexes contains duplicates. offsets[synset_idxes[current_seeked_offset_idx]] = int(split_line[1]) current_seeked_offset_idx += 1 if current_seeked_offset_idx >= len(synset_idxes): break return [offsets[synset_idx] for synset_idx in synset_idxes]
python
def _get_synset_offsets(synset_idxes): offsets = {} current_seeked_offset_idx = 0 ordered_synset_idxes = sorted(synset_idxes) with codecs.open(_SOI,'rb', 'utf-8') as fin: for line in fin: split_line = line.split(':') while current_seeked_offset_idx < len(ordered_synset_idxes) and split_line[0] == str(ordered_synset_idxes[current_seeked_offset_idx]): # Looping on single line entries in case synset_indexes contains duplicates. offsets[synset_idxes[current_seeked_offset_idx]] = int(split_line[1]) current_seeked_offset_idx += 1 if current_seeked_offset_idx >= len(synset_idxes): break return [offsets[synset_idx] for synset_idx in synset_idxes]
[ "def", "_get_synset_offsets", "(", "synset_idxes", ")", ":", "offsets", "=", "{", "}", "current_seeked_offset_idx", "=", "0", "ordered_synset_idxes", "=", "sorted", "(", "synset_idxes", ")", "with", "codecs", ".", "open", "(", "_SOI", ",", "'rb'", ",", "'utf-8...
Returs pointer offset in the WordNet file for every synset index. Notes ----- Internal function. Do not call directly. Preserves order -- for [x,y,z] returns [offset(x),offset(y),offset(z)]. Parameters ---------- synset_idxes : list of ints Lists synset IDs, which need offset. Returns ------- list of ints Lists pointer offsets in Wordnet file.
[ "Returs", "pointer", "offset", "in", "the", "WordNet", "file", "for", "every", "synset", "index", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L53-L88
7,669
estnltk/estnltk
estnltk/wordnet/wn.py
_get_synsets
def _get_synsets(synset_offsets): """Given synset offsets in the WordNet file, parses synset object for every offset. Notes ----- Internal function. Do not call directly. Stores every parsed synset into global synset dictionary under two keys: synset's name lemma.pos.sense_no and synset's id (unique integer). Parameters ---------- synset_offsets : list of ints Lists pointer offsets from which synset objects will be parsed. Returns ------- list of Synsets Lists synset objects which synset_offsets point to. """ global parser if parser is None: parser = Parser(_WN_FILE) synsets = [] for offset in synset_offsets: raw_synset = parser.parse_synset(offset) synset = Synset(raw_synset) SYNSETS_DICT[_get_key_from_raw_synset(raw_synset)] = synset SYNSETS_DICT[synset.id] = synset synsets.append(synset) return synsets
python
def _get_synsets(synset_offsets): global parser if parser is None: parser = Parser(_WN_FILE) synsets = [] for offset in synset_offsets: raw_synset = parser.parse_synset(offset) synset = Synset(raw_synset) SYNSETS_DICT[_get_key_from_raw_synset(raw_synset)] = synset SYNSETS_DICT[synset.id] = synset synsets.append(synset) return synsets
[ "def", "_get_synsets", "(", "synset_offsets", ")", ":", "global", "parser", "if", "parser", "is", "None", ":", "parser", "=", "Parser", "(", "_WN_FILE", ")", "synsets", "=", "[", "]", "for", "offset", "in", "synset_offsets", ":", "raw_synset", "=", "parser...
Given synset offsets in the WordNet file, parses synset object for every offset. Notes ----- Internal function. Do not call directly. Stores every parsed synset into global synset dictionary under two keys: synset's name lemma.pos.sense_no and synset's id (unique integer). Parameters ---------- synset_offsets : list of ints Lists pointer offsets from which synset objects will be parsed. Returns ------- list of Synsets Lists synset objects which synset_offsets point to.
[ "Given", "synset", "offsets", "in", "the", "WordNet", "file", "parses", "synset", "object", "for", "every", "offset", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L90-L124
7,670
estnltk/estnltk
estnltk/wordnet/wn.py
_get_key_from_raw_synset
def _get_key_from_raw_synset(raw_synset): """Derives synset key in the form of `lemma.pos.sense_no` from the provided eurown.py Synset class, Notes ----- Internal function. Do not call directly. Parameters ---------- raw_synset : eurown.Synset Synset representation from which lemma, part-of-speech and sense is derived. Returns ------- string Key of the synset in the form of `lemma.pos.sense_no`. """ pos = raw_synset.pos literal = raw_synset.variants[0].literal sense = "%02d"%raw_synset.variants[0].sense return '.'.join([literal,pos,sense])
python
def _get_key_from_raw_synset(raw_synset): pos = raw_synset.pos literal = raw_synset.variants[0].literal sense = "%02d"%raw_synset.variants[0].sense return '.'.join([literal,pos,sense])
[ "def", "_get_key_from_raw_synset", "(", "raw_synset", ")", ":", "pos", "=", "raw_synset", ".", "pos", "literal", "=", "raw_synset", ".", "variants", "[", "0", "]", ".", "literal", "sense", "=", "\"%02d\"", "%", "raw_synset", ".", "variants", "[", "0", "]",...
Derives synset key in the form of `lemma.pos.sense_no` from the provided eurown.py Synset class, Notes ----- Internal function. Do not call directly. Parameters ---------- raw_synset : eurown.Synset Synset representation from which lemma, part-of-speech and sense is derived. Returns ------- string Key of the synset in the form of `lemma.pos.sense_no`.
[ "Derives", "synset", "key", "in", "the", "form", "of", "lemma", ".", "pos", ".", "sense_no", "from", "the", "provided", "eurown", ".", "py", "Synset", "class" ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L126-L148
7,671
estnltk/estnltk
estnltk/wordnet/wn.py
synset
def synset(synset_key): """Returns synset object with the provided key. Notes ----- Uses lazy initialization - synsets will be fetched from a dictionary after the first request. Parameters ---------- synset_key : string Unique synset identifier in the form of `lemma.pos.sense_no`. Returns ------- Synset Synset with key `synset_key`. None, if no match was found. """ if synset_key in SYNSETS_DICT: return SYNSETS_DICT[synset_key] def _get_synset_idx(synset_key): """Returns synset index for the provided key. Note ---- Internal function. Do not call directly. """ with codecs.open(_SENSE_FILE,'rb', 'utf-8') as fin: for line in fin: split_line = line.split(':') if split_line[0] == synset_key: return int(split_line[1].strip()) return None synset_idx = _get_synset_idx(synset_key) if synset_idx == None: return None synset_offset = _get_synset_offsets([synset_idx]) synset = _get_synsets(synset_offset) return synset[0]
python
def synset(synset_key): if synset_key in SYNSETS_DICT: return SYNSETS_DICT[synset_key] def _get_synset_idx(synset_key): """Returns synset index for the provided key. Note ---- Internal function. Do not call directly. """ with codecs.open(_SENSE_FILE,'rb', 'utf-8') as fin: for line in fin: split_line = line.split(':') if split_line[0] == synset_key: return int(split_line[1].strip()) return None synset_idx = _get_synset_idx(synset_key) if synset_idx == None: return None synset_offset = _get_synset_offsets([synset_idx]) synset = _get_synsets(synset_offset) return synset[0]
[ "def", "synset", "(", "synset_key", ")", ":", "if", "synset_key", "in", "SYNSETS_DICT", ":", "return", "SYNSETS_DICT", "[", "synset_key", "]", "def", "_get_synset_idx", "(", "synset_key", ")", ":", "\"\"\"Returns synset index for the provided key.\n\n Note\n ...
Returns synset object with the provided key. Notes ----- Uses lazy initialization - synsets will be fetched from a dictionary after the first request. Parameters ---------- synset_key : string Unique synset identifier in the form of `lemma.pos.sense_no`. Returns ------- Synset Synset with key `synset_key`. None, if no match was found.
[ "Returns", "synset", "object", "with", "the", "provided", "key", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L150-L196
7,672
estnltk/estnltk
estnltk/wordnet/wn.py
synsets
def synsets(lemma,pos=None): """Returns all synset objects which have lemma as one of the variant literals and fixed pos, if provided. Notes ----- Uses lazy initialization - parses only those synsets which are not yet initialized, others are fetched from a dictionary. Parameters ---------- lemma : str Lemma of the synset. pos : str, optional Part-of-speech specification of the searched synsets, defaults to None. Returns ------- list of Synsets Synsets which contain `lemma` and of which part-of-speech is `pos`, if specified. Empty list, if no match was found. """ def _get_synset_idxes(lemma,pos): line_prefix_regexp = "%s:%s:(.*)"%(lemma,pos if pos else "\w+") line_prefix = re.compile(line_prefix_regexp) idxes = [] with codecs.open(_LIT_POS_FILE,'rb', 'utf-8') as fin: for line in fin: result = line_prefix.match(line) if result: res_indices = [int(x) for x in result.group(1).split(' ')] idxes.extend(res_indices) LEM_POS_2_SS_IDX[lemma][pos].extend(idxes) return sorted(idxes) synset_idxes = None if lemma in LEM_POS_2_SS_IDX: if pos in LEM_POS_2_SS_IDX[lemma]: synset_idxes = LEM_POS_2_SS_IDX[lemma][pos] else: synset_idxes = [idx for pos in LEM_POS_2_SS_IDX[lemma] for idx in LEM_POS_2_SS_IDX[lemma][pos]] if not synset_idxes: synset_idxes = _get_synset_idxes(lemma,pos) if len(synset_idxes) == 0: return [] stored_synsets = [SYNSETS_DICT[synset_idxes[i]] for i in range(len(synset_idxes)) if synset_idxes[i] in SYNSETS_DICT] unstored_synset_idxes = [synset_idxes[i] for i in range(len(synset_idxes)) if synset_idxes[i] not in SYNSETS_DICT] synset_offsets = _get_synset_offsets(unstored_synset_idxes) synsets = _get_synsets(synset_offsets) return stored_synsets + synsets
python
def synsets(lemma,pos=None): def _get_synset_idxes(lemma,pos): line_prefix_regexp = "%s:%s:(.*)"%(lemma,pos if pos else "\w+") line_prefix = re.compile(line_prefix_regexp) idxes = [] with codecs.open(_LIT_POS_FILE,'rb', 'utf-8') as fin: for line in fin: result = line_prefix.match(line) if result: res_indices = [int(x) for x in result.group(1).split(' ')] idxes.extend(res_indices) LEM_POS_2_SS_IDX[lemma][pos].extend(idxes) return sorted(idxes) synset_idxes = None if lemma in LEM_POS_2_SS_IDX: if pos in LEM_POS_2_SS_IDX[lemma]: synset_idxes = LEM_POS_2_SS_IDX[lemma][pos] else: synset_idxes = [idx for pos in LEM_POS_2_SS_IDX[lemma] for idx in LEM_POS_2_SS_IDX[lemma][pos]] if not synset_idxes: synset_idxes = _get_synset_idxes(lemma,pos) if len(synset_idxes) == 0: return [] stored_synsets = [SYNSETS_DICT[synset_idxes[i]] for i in range(len(synset_idxes)) if synset_idxes[i] in SYNSETS_DICT] unstored_synset_idxes = [synset_idxes[i] for i in range(len(synset_idxes)) if synset_idxes[i] not in SYNSETS_DICT] synset_offsets = _get_synset_offsets(unstored_synset_idxes) synsets = _get_synsets(synset_offsets) return stored_synsets + synsets
[ "def", "synsets", "(", "lemma", ",", "pos", "=", "None", ")", ":", "def", "_get_synset_idxes", "(", "lemma", ",", "pos", ")", ":", "line_prefix_regexp", "=", "\"%s:%s:(.*)\"", "%", "(", "lemma", ",", "pos", "if", "pos", "else", "\"\\w+\"", ")", "line_pre...
Returns all synset objects which have lemma as one of the variant literals and fixed pos, if provided. Notes ----- Uses lazy initialization - parses only those synsets which are not yet initialized, others are fetched from a dictionary. Parameters ---------- lemma : str Lemma of the synset. pos : str, optional Part-of-speech specification of the searched synsets, defaults to None. Returns ------- list of Synsets Synsets which contain `lemma` and of which part-of-speech is `pos`, if specified. Empty list, if no match was found.
[ "Returns", "all", "synset", "objects", "which", "have", "lemma", "as", "one", "of", "the", "variant", "literals", "and", "fixed", "pos", "if", "provided", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L199-L256
7,673
estnltk/estnltk
estnltk/wordnet/wn.py
all_synsets
def all_synsets(pos=None): """Return all the synsets which have the provided pos. Notes ----- Returns thousands or tens of thousands of synsets - first time will take significant time. Useful for initializing synsets as each returned synset is also stored in a global dictionary for fast retrieval the next time. Parameters ---------- pos : str Part-of-speech of the sought synsets. Sensible alternatives are wn.ADJ, wn.ADV, wn.VERB, wn.NOUN and `*`. If pos == `*`, all the synsets are retrieved and initialized for fast retrieval the next time. Returns ------- list of Synsets Lists the Synsets which have `pos` as part-of-speech. Empty list, if `pos` not in [wn.ADJ, wn.ADV, wn.VERB, wn.NOUN, `*`]. """ def _get_unique_synset_idxes(pos): idxes = [] with codecs.open(_LIT_POS_FILE,'rb', 'utf-8') as fin: if pos == None: for line in fin: split_line = line.strip().split(':') idxes.extend([int(x) for x in split_line[2].split()]) else: for line in fin: split_line = line.strip().split(':') if split_line[1] == pos: idxes.extend([int(x) for x in split_line[2].split()]) idxes = list(set(idxes)) idxes.sort() return idxes if pos in LOADED_POS: return [SYNSETS_DICT[idx] for lemma in LEM_POS_2_SS_IDX for idx in LEM_POS_2_SS_IDX[lemma][pos]] else: synset_idxes = _get_unique_synset_idxes(pos) if len(synset_idxes) == 0: return [] stored_synsets = [SYNSETS_DICT[synset_idxes[i]] for i in range(len(synset_idxes)) if synset_idxes[i] in SYNSETS_DICT] unstored_synset_idxes = [synset_idxes[i] for i in range(len(synset_idxes)) if synset_idxes[i] not in SYNSETS_DICT] synset_offsets = _get_synset_offsets(unstored_synset_idxes) synsets = _get_synsets(synset_offsets) for synset in synsets: for variant in synset.get_variants(): LEM_POS_2_SS_IDX[variant.literal][synset.pos].append(synset.id) LOADED_POS.add(pos) return stored_synsets + synsets
python
def all_synsets(pos=None): def _get_unique_synset_idxes(pos): idxes = [] with codecs.open(_LIT_POS_FILE,'rb', 'utf-8') as fin: if pos == None: for line in fin: split_line = line.strip().split(':') idxes.extend([int(x) for x in split_line[2].split()]) else: for line in fin: split_line = line.strip().split(':') if split_line[1] == pos: idxes.extend([int(x) for x in split_line[2].split()]) idxes = list(set(idxes)) idxes.sort() return idxes if pos in LOADED_POS: return [SYNSETS_DICT[idx] for lemma in LEM_POS_2_SS_IDX for idx in LEM_POS_2_SS_IDX[lemma][pos]] else: synset_idxes = _get_unique_synset_idxes(pos) if len(synset_idxes) == 0: return [] stored_synsets = [SYNSETS_DICT[synset_idxes[i]] for i in range(len(synset_idxes)) if synset_idxes[i] in SYNSETS_DICT] unstored_synset_idxes = [synset_idxes[i] for i in range(len(synset_idxes)) if synset_idxes[i] not in SYNSETS_DICT] synset_offsets = _get_synset_offsets(unstored_synset_idxes) synsets = _get_synsets(synset_offsets) for synset in synsets: for variant in synset.get_variants(): LEM_POS_2_SS_IDX[variant.literal][synset.pos].append(synset.id) LOADED_POS.add(pos) return stored_synsets + synsets
[ "def", "all_synsets", "(", "pos", "=", "None", ")", ":", "def", "_get_unique_synset_idxes", "(", "pos", ")", ":", "idxes", "=", "[", "]", "with", "codecs", ".", "open", "(", "_LIT_POS_FILE", ",", "'rb'", ",", "'utf-8'", ")", "as", "fin", ":", "if", "...
Return all the synsets which have the provided pos. Notes ----- Returns thousands or tens of thousands of synsets - first time will take significant time. Useful for initializing synsets as each returned synset is also stored in a global dictionary for fast retrieval the next time. Parameters ---------- pos : str Part-of-speech of the sought synsets. Sensible alternatives are wn.ADJ, wn.ADV, wn.VERB, wn.NOUN and `*`. If pos == `*`, all the synsets are retrieved and initialized for fast retrieval the next time. Returns ------- list of Synsets Lists the Synsets which have `pos` as part-of-speech. Empty list, if `pos` not in [wn.ADJ, wn.ADV, wn.VERB, wn.NOUN, `*`].
[ "Return", "all", "the", "synsets", "which", "have", "the", "provided", "pos", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L258-L316
7,674
estnltk/estnltk
estnltk/wordnet/wn.py
lemma
def lemma(lemma_key): """Returns the Lemma object with the given key. Parameters ---------- lemma_key : str Key of the returned lemma. Returns ------- Lemma Lemma matching the `lemma_key`. """ if lemma_key in LEMMAS_DICT: return LEMMAS_DICT[lemma_key] split_lemma_key = lemma_key.split('.') synset_key = '.'.join(split_lemma_key[:3]) lemma_literal = split_lemma_key[3] lemma_obj = Lemma(synset_key,lemma_literal) LEMMAS_DICT[lemma_key] = lemma_obj return lemma_obj
python
def lemma(lemma_key): if lemma_key in LEMMAS_DICT: return LEMMAS_DICT[lemma_key] split_lemma_key = lemma_key.split('.') synset_key = '.'.join(split_lemma_key[:3]) lemma_literal = split_lemma_key[3] lemma_obj = Lemma(synset_key,lemma_literal) LEMMAS_DICT[lemma_key] = lemma_obj return lemma_obj
[ "def", "lemma", "(", "lemma_key", ")", ":", "if", "lemma_key", "in", "LEMMAS_DICT", ":", "return", "LEMMAS_DICT", "[", "lemma_key", "]", "split_lemma_key", "=", "lemma_key", ".", "split", "(", "'.'", ")", "synset_key", "=", "'.'", ".", "join", "(", "split_...
Returns the Lemma object with the given key. Parameters ---------- lemma_key : str Key of the returned lemma. Returns ------- Lemma Lemma matching the `lemma_key`.
[ "Returns", "the", "Lemma", "object", "with", "the", "given", "key", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L318-L341
7,675
estnltk/estnltk
estnltk/wordnet/wn.py
lemmas
def lemmas(lemma,pos=None): """Returns all the Lemma objects of which name is `lemma` and which have `pos` as part of speech. Parameters ---------- lemma : str Literal of the sought Lemma objects. pos : str, optional Part of speech of the sought Lemma objects. If None, matches any part of speech. Defaults to None Returns ------- list of Lemmas Lists all the matched Lemmas. """ lemma = lemma.lower() return [lemma_obj for synset in synsets(lemma,pos) for lemma_obj in synset.lemmas() if lemma_obj.name.lower() == lemma]
python
def lemmas(lemma,pos=None): lemma = lemma.lower() return [lemma_obj for synset in synsets(lemma,pos) for lemma_obj in synset.lemmas() if lemma_obj.name.lower() == lemma]
[ "def", "lemmas", "(", "lemma", ",", "pos", "=", "None", ")", ":", "lemma", "=", "lemma", ".", "lower", "(", ")", "return", "[", "lemma_obj", "for", "synset", "in", "synsets", "(", "lemma", ",", "pos", ")", "for", "lemma_obj", "in", "synset", ".", "...
Returns all the Lemma objects of which name is `lemma` and which have `pos` as part of speech. Parameters ---------- lemma : str Literal of the sought Lemma objects. pos : str, optional Part of speech of the sought Lemma objects. If None, matches any part of speech. Defaults to None Returns ------- list of Lemmas Lists all the matched Lemmas.
[ "Returns", "all", "the", "Lemma", "objects", "of", "which", "name", "is", "lemma", "and", "which", "have", "pos", "as", "part", "of", "speech", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L348-L369
7,676
estnltk/estnltk
estnltk/wordnet/wn.py
Synset._recursive_hypernyms
def _recursive_hypernyms(self, hypernyms): """Finds all the hypernyms of the synset transitively. Notes ----- Internal method. Do not call directly. Parameters ---------- hypernyms : set of Synsets An set of hypernyms met so far. Returns ------- set of Synsets Returns the input set. """ hypernyms |= set(self.hypernyms()) for synset in self.hypernyms(): hypernyms |= synset._recursive_hypernyms(hypernyms) return hypernyms
python
def _recursive_hypernyms(self, hypernyms): hypernyms |= set(self.hypernyms()) for synset in self.hypernyms(): hypernyms |= synset._recursive_hypernyms(hypernyms) return hypernyms
[ "def", "_recursive_hypernyms", "(", "self", ",", "hypernyms", ")", ":", "hypernyms", "|=", "set", "(", "self", ".", "hypernyms", "(", ")", ")", "for", "synset", "in", "self", ".", "hypernyms", "(", ")", ":", "hypernyms", "|=", "synset", ".", "_recursive_...
Finds all the hypernyms of the synset transitively. Notes ----- Internal method. Do not call directly. Parameters ---------- hypernyms : set of Synsets An set of hypernyms met so far. Returns ------- set of Synsets Returns the input set.
[ "Finds", "all", "the", "hypernyms", "of", "the", "synset", "transitively", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L427-L449
7,677
estnltk/estnltk
estnltk/wordnet/wn.py
Synset._min_depth
def _min_depth(self): """Finds minimum path length from the root. Notes ----- Internal method. Do not call directly. Returns ------- int Minimum path length from the root. """ if "min_depth" in self.__dict__: return self.__dict__["min_depth"] min_depth = 0 hypernyms = self.hypernyms() if hypernyms: min_depth = 1 + min(h._min_depth() for h in hypernyms) self.__dict__["min_depth"] = min_depth return min_depth
python
def _min_depth(self): if "min_depth" in self.__dict__: return self.__dict__["min_depth"] min_depth = 0 hypernyms = self.hypernyms() if hypernyms: min_depth = 1 + min(h._min_depth() for h in hypernyms) self.__dict__["min_depth"] = min_depth return min_depth
[ "def", "_min_depth", "(", "self", ")", ":", "if", "\"min_depth\"", "in", "self", ".", "__dict__", ":", "return", "self", ".", "__dict__", "[", "\"min_depth\"", "]", "min_depth", "=", "0", "hypernyms", "=", "self", ".", "hypernyms", "(", ")", "if", "hyper...
Finds minimum path length from the root. Notes ----- Internal method. Do not call directly. Returns ------- int Minimum path length from the root.
[ "Finds", "minimum", "path", "length", "from", "the", "root", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L451-L473
7,678
estnltk/estnltk
estnltk/wordnet/wn.py
Synset.get_related_synsets
def get_related_synsets(self,relation): """Retrieves all the synsets which are related by given relation. Parameters ---------- relation : str Name of the relation via which the sought synsets are linked. Returns ------- list of Synsets Synsets which are related via `relation`. """ results = [] for relation_candidate in self._raw_synset.internalLinks: if relation_candidate.name == relation: linked_synset = synset(_get_key_from_raw_synset(relation_candidate.target_concept)) relation_candidate.target_concept = linked_synset._raw_synset results.append(linked_synset) return results
python
def get_related_synsets(self,relation): results = [] for relation_candidate in self._raw_synset.internalLinks: if relation_candidate.name == relation: linked_synset = synset(_get_key_from_raw_synset(relation_candidate.target_concept)) relation_candidate.target_concept = linked_synset._raw_synset results.append(linked_synset) return results
[ "def", "get_related_synsets", "(", "self", ",", "relation", ")", ":", "results", "=", "[", "]", "for", "relation_candidate", "in", "self", ".", "_raw_synset", ".", "internalLinks", ":", "if", "relation_candidate", ".", "name", "==", "relation", ":", "linked_sy...
Retrieves all the synsets which are related by given relation. Parameters ---------- relation : str Name of the relation via which the sought synsets are linked. Returns ------- list of Synsets Synsets which are related via `relation`.
[ "Retrieves", "all", "the", "synsets", "which", "are", "related", "by", "given", "relation", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L543-L564
7,679
estnltk/estnltk
estnltk/wordnet/wn.py
Synset.closure
def closure(self, relation, depth=float('inf')): """Finds all the ancestors of the synset using provided relation. Parameters ---------- relation : str Name of the relation which is recursively used to fetch the ancestors. Returns ------- list of Synsets Returns the ancestors of the synset via given relations. """ ancestors = [] unvisited_ancestors = [(synset,1) for synset in self.get_related_synsets(relation)] while len(unvisited_ancestors) > 0: ancestor_depth = unvisited_ancestors.pop() if ancestor_depth[1] > depth: continue unvisited_ancestors.extend([(synset,ancestor_depth[1]+1) for synset in ancestor_depth[0].get_related_synsets(relation)]) ancestors.append(ancestor_depth[0]) return list(set(ancestors))
python
def closure(self, relation, depth=float('inf')): ancestors = [] unvisited_ancestors = [(synset,1) for synset in self.get_related_synsets(relation)] while len(unvisited_ancestors) > 0: ancestor_depth = unvisited_ancestors.pop() if ancestor_depth[1] > depth: continue unvisited_ancestors.extend([(synset,ancestor_depth[1]+1) for synset in ancestor_depth[0].get_related_synsets(relation)]) ancestors.append(ancestor_depth[0]) return list(set(ancestors))
[ "def", "closure", "(", "self", ",", "relation", ",", "depth", "=", "float", "(", "'inf'", ")", ")", ":", "ancestors", "=", "[", "]", "unvisited_ancestors", "=", "[", "(", "synset", ",", "1", ")", "for", "synset", "in", "self", ".", "get_related_synsets...
Finds all the ancestors of the synset using provided relation. Parameters ---------- relation : str Name of the relation which is recursively used to fetch the ancestors. Returns ------- list of Synsets Returns the ancestors of the synset via given relations.
[ "Finds", "all", "the", "ancestors", "of", "the", "synset", "using", "provided", "relation", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L566-L591
7,680
estnltk/estnltk
estnltk/wordnet/wn.py
Synset.lch_similarity
def lch_similarity(self, synset): """Calculates Leacock and Chodorow's similarity between the two synsets. Notes ----- Similarity is calculated using the formula -log( (dist(synset1,synset2)+1) / (2*maximum taxonomy depth) ). Parameters ---------- synset : Synset Synset from which the similarity is calculated. Returns ------- float Leacock and Chodorow's from `synset`. None, if synsets are not connected via hypernymy/hyponymy relations. Obvious, if part-of-speeches don't match. """ if self._raw_synset.pos != synset._raw_synset.pos: return None depth = MAX_TAXONOMY_DEPTHS[self._raw_synset.pos] distance = self._shortest_path_distance(synset) if distance >= 0: return -math.log((distance + 1) / (2.0 * depth)) else: return None
python
def lch_similarity(self, synset): if self._raw_synset.pos != synset._raw_synset.pos: return None depth = MAX_TAXONOMY_DEPTHS[self._raw_synset.pos] distance = self._shortest_path_distance(synset) if distance >= 0: return -math.log((distance + 1) / (2.0 * depth)) else: return None
[ "def", "lch_similarity", "(", "self", ",", "synset", ")", ":", "if", "self", ".", "_raw_synset", ".", "pos", "!=", "synset", ".", "_raw_synset", ".", "pos", ":", "return", "None", "depth", "=", "MAX_TAXONOMY_DEPTHS", "[", "self", ".", "_raw_synset", ".", ...
Calculates Leacock and Chodorow's similarity between the two synsets. Notes ----- Similarity is calculated using the formula -log( (dist(synset1,synset2)+1) / (2*maximum taxonomy depth) ). Parameters ---------- synset : Synset Synset from which the similarity is calculated. Returns ------- float Leacock and Chodorow's from `synset`. None, if synsets are not connected via hypernymy/hyponymy relations. Obvious, if part-of-speeches don't match.
[ "Calculates", "Leacock", "and", "Chodorow", "s", "similarity", "between", "the", "two", "synsets", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L694-L724
7,681
estnltk/estnltk
estnltk/syntax/syntax_preprocessing.py
SyntaxPreprocessing.process_vm_json
def process_vm_json( self, json_dict, **kwargs ): ''' Executes the preprocessing pipeline on vabamorf's JSON, given as a dict; Returns a list: lines of analyses in the VISL CG3 input format; ''' mrf_lines = convert_vm_json_to_mrf( json_dict ) return self.process_mrf_lines( mrf_lines, **kwargs )
python
def process_vm_json( self, json_dict, **kwargs ): ''' Executes the preprocessing pipeline on vabamorf's JSON, given as a dict; Returns a list: lines of analyses in the VISL CG3 input format; ''' mrf_lines = convert_vm_json_to_mrf( json_dict ) return self.process_mrf_lines( mrf_lines, **kwargs )
[ "def", "process_vm_json", "(", "self", ",", "json_dict", ",", "*", "*", "kwargs", ")", ":", "mrf_lines", "=", "convert_vm_json_to_mrf", "(", "json_dict", ")", "return", "self", ".", "process_mrf_lines", "(", "mrf_lines", ",", "*", "*", "kwargs", ")" ]
Executes the preprocessing pipeline on vabamorf's JSON, given as a dict; Returns a list: lines of analyses in the VISL CG3 input format;
[ "Executes", "the", "preprocessing", "pipeline", "on", "vabamorf", "s", "JSON", "given", "as", "a", "dict", ";" ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/syntax_preprocessing.py#L945-L951
7,682
estnltk/estnltk
estnltk/syntax/syntax_preprocessing.py
SyntaxPreprocessing.process_Text
def process_Text( self, text, **kwargs ): ''' Executes the preprocessing pipeline on estnltk's Text object. Returns a list: lines of analyses in the VISL CG3 input format; ''' mrf_lines = convert_Text_to_mrf( text ) return self.process_mrf_lines( mrf_lines, **kwargs )
python
def process_Text( self, text, **kwargs ): ''' Executes the preprocessing pipeline on estnltk's Text object. Returns a list: lines of analyses in the VISL CG3 input format; ''' mrf_lines = convert_Text_to_mrf( text ) return self.process_mrf_lines( mrf_lines, **kwargs )
[ "def", "process_Text", "(", "self", ",", "text", ",", "*", "*", "kwargs", ")", ":", "mrf_lines", "=", "convert_Text_to_mrf", "(", "text", ")", "return", "self", ".", "process_mrf_lines", "(", "mrf_lines", ",", "*", "*", "kwargs", ")" ]
Executes the preprocessing pipeline on estnltk's Text object. Returns a list: lines of analyses in the VISL CG3 input format;
[ "Executes", "the", "preprocessing", "pipeline", "on", "estnltk", "s", "Text", "object", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/syntax_preprocessing.py#L954-L960
7,683
estnltk/estnltk
estnltk/syntax/syntax_preprocessing.py
SyntaxPreprocessing.process_mrf_lines
def process_mrf_lines( self, mrf_lines, **kwargs ): ''' Executes the preprocessing pipeline on mrf_lines. The input should be an analysis of the text in Filosoft's old mrf format; Returns the input list, where elements (tokens/analyses) have been converted into the new format; ''' converted1 = convert_mrf_to_syntax_mrf( mrf_lines, self.fs_to_synt_rules ) converted2 = convert_pronouns( converted1 ) converted3 = remove_duplicate_analyses( converted2, allow_to_delete_all=self.allow_to_remove_all ) converted4 = add_hashtag_info( converted3 ) converted5 = tag_subcat_info( converted4, self.subcat_rules ) converted6 = remove_duplicate_analyses( converted5, allow_to_delete_all=self.allow_to_remove_all ) converted7 = convert_to_cg3_input( converted6 ) return converted7
python
def process_mrf_lines( self, mrf_lines, **kwargs ): ''' Executes the preprocessing pipeline on mrf_lines. The input should be an analysis of the text in Filosoft's old mrf format; Returns the input list, where elements (tokens/analyses) have been converted into the new format; ''' converted1 = convert_mrf_to_syntax_mrf( mrf_lines, self.fs_to_synt_rules ) converted2 = convert_pronouns( converted1 ) converted3 = remove_duplicate_analyses( converted2, allow_to_delete_all=self.allow_to_remove_all ) converted4 = add_hashtag_info( converted3 ) converted5 = tag_subcat_info( converted4, self.subcat_rules ) converted6 = remove_duplicate_analyses( converted5, allow_to_delete_all=self.allow_to_remove_all ) converted7 = convert_to_cg3_input( converted6 ) return converted7
[ "def", "process_mrf_lines", "(", "self", ",", "mrf_lines", ",", "*", "*", "kwargs", ")", ":", "converted1", "=", "convert_mrf_to_syntax_mrf", "(", "mrf_lines", ",", "self", ".", "fs_to_synt_rules", ")", "converted2", "=", "convert_pronouns", "(", "converted1", "...
Executes the preprocessing pipeline on mrf_lines. The input should be an analysis of the text in Filosoft's old mrf format; Returns the input list, where elements (tokens/analyses) have been converted into the new format;
[ "Executes", "the", "preprocessing", "pipeline", "on", "mrf_lines", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/syntax_preprocessing.py#L963-L978
7,684
estnltk/estnltk
setup.py
get_sources
def get_sources(src_dir='src', ending='.cpp'): """Function to get a list of files ending with `ending` in `src_dir`.""" return [os.path.join(src_dir, fnm) for fnm in os.listdir(src_dir) if fnm.endswith(ending)]
python
def get_sources(src_dir='src', ending='.cpp'): return [os.path.join(src_dir, fnm) for fnm in os.listdir(src_dir) if fnm.endswith(ending)]
[ "def", "get_sources", "(", "src_dir", "=", "'src'", ",", "ending", "=", "'.cpp'", ")", ":", "return", "[", "os", ".", "path", ".", "join", "(", "src_dir", ",", "fnm", ")", "for", "fnm", "in", "os", ".", "listdir", "(", "src_dir", ")", "if", "fnm", ...
Function to get a list of files ending with `ending` in `src_dir`.
[ "Function", "to", "get", "a", "list", "of", "files", "ending", "with", "ending", "in", "src_dir", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/setup.py#L14-L16
7,685
estnltk/estnltk
estnltk/textcleaner.py
TextCleaner.clean
def clean(self, text): """Remove all unwanted characters from text.""" return ''.join([c for c in text if c in self.alphabet])
python
def clean(self, text): return ''.join([c for c in text if c in self.alphabet])
[ "def", "clean", "(", "self", ",", "text", ")", ":", "return", "''", ".", "join", "(", "[", "c", "for", "c", "in", "text", "if", "c", "in", "self", ".", "alphabet", "]", ")" ]
Remove all unwanted characters from text.
[ "Remove", "all", "unwanted", "characters", "from", "text", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/textcleaner.py#L36-L38
7,686
estnltk/estnltk
estnltk/textcleaner.py
TextCleaner.invalid_characters
def invalid_characters(self, text): """Give simple list of invalid characters present in text.""" return ''.join(sorted(set([c for c in text if c not in self.alphabet])))
python
def invalid_characters(self, text): return ''.join(sorted(set([c for c in text if c not in self.alphabet])))
[ "def", "invalid_characters", "(", "self", ",", "text", ")", ":", "return", "''", ".", "join", "(", "sorted", "(", "set", "(", "[", "c", "for", "c", "in", "text", "if", "c", "not", "in", "self", ".", "alphabet", "]", ")", ")", ")" ]
Give simple list of invalid characters present in text.
[ "Give", "simple", "list", "of", "invalid", "characters", "present", "in", "text", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/textcleaner.py#L49-L51
7,687
estnltk/estnltk
estnltk/textcleaner.py
TextCleaner.find_invalid_chars
def find_invalid_chars(self, text, context_size=20): """Find invalid characters in text and store information about the findings. Parameters ---------- context_size: int How many characters to return as the context. """ result = defaultdict(list) for idx, char in enumerate(text): if char not in self.alphabet: start = max(0, idx-context_size) end = min(len(text), idx+context_size) result[char].append(text[start:end]) return result
python
def find_invalid_chars(self, text, context_size=20): result = defaultdict(list) for idx, char in enumerate(text): if char not in self.alphabet: start = max(0, idx-context_size) end = min(len(text), idx+context_size) result[char].append(text[start:end]) return result
[ "def", "find_invalid_chars", "(", "self", ",", "text", ",", "context_size", "=", "20", ")", ":", "result", "=", "defaultdict", "(", "list", ")", "for", "idx", ",", "char", "in", "enumerate", "(", "text", ")", ":", "if", "char", "not", "in", "self", "...
Find invalid characters in text and store information about the findings. Parameters ---------- context_size: int How many characters to return as the context.
[ "Find", "invalid", "characters", "in", "text", "and", "store", "information", "about", "the", "findings", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/textcleaner.py#L53-L69
7,688
estnltk/estnltk
estnltk/textcleaner.py
TextCleaner.compute_report
def compute_report(self, texts, context_size=10): """Compute statistics of invalid characters on given texts. Parameters ---------- texts: list of str The texts to search for invalid characters. context_size: int How many characters to return as the context. Returns ------- dict of (char -> list of tuple (index, context)) Returns a dictionary, where keys are invalid characters. Values are lists containign tuples with character indices and context strings. """ result = defaultdict(list) for text in texts: for char, examples in self.find_invalid_chars(text, context_size).items(): result[char].extend(examples) return result
python
def compute_report(self, texts, context_size=10): result = defaultdict(list) for text in texts: for char, examples in self.find_invalid_chars(text, context_size).items(): result[char].extend(examples) return result
[ "def", "compute_report", "(", "self", ",", "texts", ",", "context_size", "=", "10", ")", ":", "result", "=", "defaultdict", "(", "list", ")", "for", "text", "in", "texts", ":", "for", "char", ",", "examples", "in", "self", ".", "find_invalid_chars", "(",...
Compute statistics of invalid characters on given texts. Parameters ---------- texts: list of str The texts to search for invalid characters. context_size: int How many characters to return as the context. Returns ------- dict of (char -> list of tuple (index, context)) Returns a dictionary, where keys are invalid characters. Values are lists containign tuples with character indices and context strings.
[ "Compute", "statistics", "of", "invalid", "characters", "on", "given", "texts", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/textcleaner.py#L71-L92
7,689
estnltk/estnltk
estnltk/textcleaner.py
TextCleaner.report
def report(self, texts, n_examples=10, context_size=10, f=sys.stdout): """Compute statistics of invalid characters and print them. Parameters ---------- texts: list of str The texts to search for invalid characters. n_examples: int How many examples to display per invalid character. context_size: int How many characters to return as the context. f: file The file to print the report (default is sys.stdout) """ result = list(self.compute_report(texts, context_size).items()) result.sort(key=lambda x: (len(x[1]), x[0]), reverse=True) s = 'Analyzed {0} texts.\n'.format(len(texts)) if (len(texts)) == 0: f.write(s) return if len(result) > 0: s += 'Invalid characters and their counts:\n' for c, examples in result: s += '"{0}"\t{1}\n'.format(c, len(examples)) s += '\n' for c, examples in result: s += 'For character "{0}", found {1} occurrences.\nExamples:\n'.format(c, len(examples)) examples = sample(examples, min(len(examples), n_examples)) for idx, example in enumerate(examples): s += 'example {0}: {1}\n'.format(idx+1, example) s += '\n' f.write(s) else: f.write('All OK\n')
python
def report(self, texts, n_examples=10, context_size=10, f=sys.stdout): result = list(self.compute_report(texts, context_size).items()) result.sort(key=lambda x: (len(x[1]), x[0]), reverse=True) s = 'Analyzed {0} texts.\n'.format(len(texts)) if (len(texts)) == 0: f.write(s) return if len(result) > 0: s += 'Invalid characters and their counts:\n' for c, examples in result: s += '"{0}"\t{1}\n'.format(c, len(examples)) s += '\n' for c, examples in result: s += 'For character "{0}", found {1} occurrences.\nExamples:\n'.format(c, len(examples)) examples = sample(examples, min(len(examples), n_examples)) for idx, example in enumerate(examples): s += 'example {0}: {1}\n'.format(idx+1, example) s += '\n' f.write(s) else: f.write('All OK\n')
[ "def", "report", "(", "self", ",", "texts", ",", "n_examples", "=", "10", ",", "context_size", "=", "10", ",", "f", "=", "sys", ".", "stdout", ")", ":", "result", "=", "list", "(", "self", ".", "compute_report", "(", "texts", ",", "context_size", ")"...
Compute statistics of invalid characters and print them. Parameters ---------- texts: list of str The texts to search for invalid characters. n_examples: int How many examples to display per invalid character. context_size: int How many characters to return as the context. f: file The file to print the report (default is sys.stdout)
[ "Compute", "statistics", "of", "invalid", "characters", "and", "print", "them", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/textcleaner.py#L94-L128
7,690
estnltk/estnltk
estnltk/syntax/maltparser_support.py
__sort_analyses
def __sort_analyses(sentence): ''' Sorts analysis of all the words in the sentence. This is required for consistency, because by default, analyses are listed in arbitrary order; ''' for word in sentence: if ANALYSIS not in word: raise Exception( '(!) Error: no analysis found from word: '+str(word) ) else: word[ANALYSIS] = sorted(word[ANALYSIS], \ key=lambda x : "_".join( [x[ROOT],x[POSTAG],x[FORM],x[CLITIC]] )) return sentence
python
def __sort_analyses(sentence): ''' Sorts analysis of all the words in the sentence. This is required for consistency, because by default, analyses are listed in arbitrary order; ''' for word in sentence: if ANALYSIS not in word: raise Exception( '(!) Error: no analysis found from word: '+str(word) ) else: word[ANALYSIS] = sorted(word[ANALYSIS], \ key=lambda x : "_".join( [x[ROOT],x[POSTAG],x[FORM],x[CLITIC]] )) return sentence
[ "def", "__sort_analyses", "(", "sentence", ")", ":", "for", "word", "in", "sentence", ":", "if", "ANALYSIS", "not", "in", "word", ":", "raise", "Exception", "(", "'(!) Error: no analysis found from word: '", "+", "str", "(", "word", ")", ")", "else", ":", "w...
Sorts analysis of all the words in the sentence. This is required for consistency, because by default, analyses are listed in arbitrary order;
[ "Sorts", "analysis", "of", "all", "the", "words", "in", "the", "sentence", ".", "This", "is", "required", "for", "consistency", "because", "by", "default", "analyses", "are", "listed", "in", "arbitrary", "order", ";" ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/maltparser_support.py#L315-L325
7,691
estnltk/estnltk
estnltk/syntax/maltparser_support.py
augmentTextWithCONLLstr
def augmentTextWithCONLLstr( conll_str_array, text ): ''' Augments given Text object with the information from Maltparser's output. More specifically, adds information about SYNTAX_LABEL, SYNTAX_HEAD and DEPREL to each token in the Text object; ''' j = 0 for sentence in text.divide( layer=WORDS, by=SENTENCES ): sentence = __sort_analyses(sentence) for i in range(len(sentence)): estnltkToken = sentence[i] maltparserToken = conll_str_array[j] if len( maltparserToken ) > 1: maltParserAnalysis = maltparserToken.split('\t') if estnltkToken[TEXT] == maltParserAnalysis[1]: # Fetch information about the syntactic relation: estnltkToken[SYNTAX_LABEL] = maltParserAnalysis[0] estnltkToken[SYNTAX_HEAD] = maltParserAnalysis[6] # Fetch the name of the surface syntactic relation estnltkToken[DEPREL] = maltParserAnalysis[7] else: raise Exception("A misalignment between Text and Maltparser's output: ",\ estnltkToken, maltparserToken ) j += 1 j += 1
python
def augmentTextWithCONLLstr( conll_str_array, text ): ''' Augments given Text object with the information from Maltparser's output. More specifically, adds information about SYNTAX_LABEL, SYNTAX_HEAD and DEPREL to each token in the Text object; ''' j = 0 for sentence in text.divide( layer=WORDS, by=SENTENCES ): sentence = __sort_analyses(sentence) for i in range(len(sentence)): estnltkToken = sentence[i] maltparserToken = conll_str_array[j] if len( maltparserToken ) > 1: maltParserAnalysis = maltparserToken.split('\t') if estnltkToken[TEXT] == maltParserAnalysis[1]: # Fetch information about the syntactic relation: estnltkToken[SYNTAX_LABEL] = maltParserAnalysis[0] estnltkToken[SYNTAX_HEAD] = maltParserAnalysis[6] # Fetch the name of the surface syntactic relation estnltkToken[DEPREL] = maltParserAnalysis[7] else: raise Exception("A misalignment between Text and Maltparser's output: ",\ estnltkToken, maltparserToken ) j += 1 j += 1
[ "def", "augmentTextWithCONLLstr", "(", "conll_str_array", ",", "text", ")", ":", "j", "=", "0", "for", "sentence", "in", "text", ".", "divide", "(", "layer", "=", "WORDS", ",", "by", "=", "SENTENCES", ")", ":", "sentence", "=", "__sort_analyses", "(", "s...
Augments given Text object with the information from Maltparser's output. More specifically, adds information about SYNTAX_LABEL, SYNTAX_HEAD and DEPREL to each token in the Text object;
[ "Augments", "given", "Text", "object", "with", "the", "information", "from", "Maltparser", "s", "output", ".", "More", "specifically", "adds", "information", "about", "SYNTAX_LABEL", "SYNTAX_HEAD", "and", "DEPREL", "to", "each", "token", "in", "the", "Text", "ob...
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/maltparser_support.py#L635-L658
7,692
estnltk/estnltk
estnltk/prettyprinter/rules.py
create_rules
def create_rules(aes, value): """Create a Rules instance for a single aesthetic value. Parameter --------- aes: str The name of the aesthetic value: str or list The value associated with any aesthetic """ if isinstance(value, six.string_types): return Rules(aes) else: rules = Rules() for idx, (pattern, css_value) in enumerate(value): rules.add_rule(pattern, '{0}_{1}'.format(aes, idx)) return rules
python
def create_rules(aes, value): if isinstance(value, six.string_types): return Rules(aes) else: rules = Rules() for idx, (pattern, css_value) in enumerate(value): rules.add_rule(pattern, '{0}_{1}'.format(aes, idx)) return rules
[ "def", "create_rules", "(", "aes", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "return", "Rules", "(", "aes", ")", "else", ":", "rules", "=", "Rules", "(", ")", "for", "idx", ",", "(", "patte...
Create a Rules instance for a single aesthetic value. Parameter --------- aes: str The name of the aesthetic value: str or list The value associated with any aesthetic
[ "Create", "a", "Rules", "instance", "for", "a", "single", "aesthetic", "value", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/prettyprinter/rules.py#L51-L67
7,693
estnltk/estnltk
estnltk/prettyprinter/rules.py
Rules.add_rule
def add_rule(self, pattern, css_class): """Add a new rule. Parameters ---------- pattern: str Pattern that is compiled to a regular expression. css_class: str The class that will corresponds to given pattern. """ #print('adding rule <{0}> <{1}>'.format(pattern, css_class)) self.__patterns.append(re.compile(pattern, flags=re.U | re.M)) self.__css_classes.append(css_class)
python
def add_rule(self, pattern, css_class): #print('adding rule <{0}> <{1}>'.format(pattern, css_class)) self.__patterns.append(re.compile(pattern, flags=re.U | re.M)) self.__css_classes.append(css_class)
[ "def", "add_rule", "(", "self", ",", "pattern", ",", "css_class", ")", ":", "#print('adding rule <{0}> <{1}>'.format(pattern, css_class))", "self", ".", "__patterns", ".", "append", "(", "re", ".", "compile", "(", "pattern", ",", "flags", "=", "re", ".", "U", ...
Add a new rule. Parameters ---------- pattern: str Pattern that is compiled to a regular expression. css_class: str The class that will corresponds to given pattern.
[ "Add", "a", "new", "rule", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/prettyprinter/rules.py#L25-L37
7,694
estnltk/estnltk
estnltk/ner.py
json_document_to_estner_document
def json_document_to_estner_document(jsondoc): """Convert an estnltk document to an estner document. Parameters ---------- jsondoc: dict Estnltk JSON-style document. Returns ------- estnltk.estner.ner.Document A ner document. """ sentences = [] for json_sent in jsondoc.split_by_sentences(): snt = Sentence() zipped = list(zip( json_sent.word_texts, json_sent.lemmas, json_sent.root_tokens, json_sent.forms, json_sent.endings, json_sent.postags)) json_toks = [{TEXT: text, LEMMA: lemma, ROOT_TOKENS: root_tokens, FORM: form, ENDING: ending, POSTAG: postag} for text, lemma, root_tokens, form, ending, postag in zipped] # add labels, if they are present for tok, word in zip(json_toks, json_sent.words): if LABEL in word: tok[LABEL] = word[LABEL] for json_tok in json_toks: token = json_token_to_estner_token(json_tok) snt.append(token) if snt: for i in range(1, len(snt)): snt[i - 1].next = snt[i] snt[i].prew = snt[i - 1] sentences.append(snt) return Document(sentences=sentences)
python
def json_document_to_estner_document(jsondoc): sentences = [] for json_sent in jsondoc.split_by_sentences(): snt = Sentence() zipped = list(zip( json_sent.word_texts, json_sent.lemmas, json_sent.root_tokens, json_sent.forms, json_sent.endings, json_sent.postags)) json_toks = [{TEXT: text, LEMMA: lemma, ROOT_TOKENS: root_tokens, FORM: form, ENDING: ending, POSTAG: postag} for text, lemma, root_tokens, form, ending, postag in zipped] # add labels, if they are present for tok, word in zip(json_toks, json_sent.words): if LABEL in word: tok[LABEL] = word[LABEL] for json_tok in json_toks: token = json_token_to_estner_token(json_tok) snt.append(token) if snt: for i in range(1, len(snt)): snt[i - 1].next = snt[i] snt[i].prew = snt[i - 1] sentences.append(snt) return Document(sentences=sentences)
[ "def", "json_document_to_estner_document", "(", "jsondoc", ")", ":", "sentences", "=", "[", "]", "for", "json_sent", "in", "jsondoc", ".", "split_by_sentences", "(", ")", ":", "snt", "=", "Sentence", "(", ")", "zipped", "=", "list", "(", "zip", "(", "json_...
Convert an estnltk document to an estner document. Parameters ---------- jsondoc: dict Estnltk JSON-style document. Returns ------- estnltk.estner.ner.Document A ner document.
[ "Convert", "an", "estnltk", "document", "to", "an", "estner", "document", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/ner.py#L64-L101
7,695
estnltk/estnltk
estnltk/ner.py
json_token_to_estner_token
def json_token_to_estner_token(json_token): """Convert a JSON-style word token to an estner token. Parameters ---------- vabamorf_token: dict Vabamorf token representing a single word. label: str The label string. Returns ------- estnltk.estner.ner.Token """ token = Token() word = json_token[TEXT] lemma = word morph = '' label = 'O' ending = json_token[ENDING] root_toks = json_token[ROOT_TOKENS] if isinstance(root_toks[0], list): root_toks = root_toks[0] lemma = '_'.join(root_toks) + ('+' + ending if ending else '') if not lemma: lemma = word morph = '_%s_' % json_token[POSTAG] morph += ' ' + json_token[FORM] if LABEL in json_token: label = json_token[LABEL] return Token(word, lemma, morph, label)
python
def json_token_to_estner_token(json_token): token = Token() word = json_token[TEXT] lemma = word morph = '' label = 'O' ending = json_token[ENDING] root_toks = json_token[ROOT_TOKENS] if isinstance(root_toks[0], list): root_toks = root_toks[0] lemma = '_'.join(root_toks) + ('+' + ending if ending else '') if not lemma: lemma = word morph = '_%s_' % json_token[POSTAG] morph += ' ' + json_token[FORM] if LABEL in json_token: label = json_token[LABEL] return Token(word, lemma, morph, label)
[ "def", "json_token_to_estner_token", "(", "json_token", ")", ":", "token", "=", "Token", "(", ")", "word", "=", "json_token", "[", "TEXT", "]", "lemma", "=", "word", "morph", "=", "''", "label", "=", "'O'", "ending", "=", "json_token", "[", "ENDING", "]"...
Convert a JSON-style word token to an estner token. Parameters ---------- vabamorf_token: dict Vabamorf token representing a single word. label: str The label string. Returns ------- estnltk.estner.ner.Token
[ "Convert", "a", "JSON", "-", "style", "word", "token", "to", "an", "estner", "token", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/ner.py#L104-L134
7,696
estnltk/estnltk
estnltk/ner.py
ModelStorageUtil.makedir
def makedir(self): """ Create model_dir directory """ try: os.makedirs(self.model_dir) except OSError as exception: if exception.errno != errno.EEXIST: raise
python
def makedir(self): try: os.makedirs(self.model_dir) except OSError as exception: if exception.errno != errno.EEXIST: raise
[ "def", "makedir", "(", "self", ")", ":", "try", ":", "os", ".", "makedirs", "(", "self", ".", "model_dir", ")", "except", "OSError", "as", "exception", ":", "if", "exception", ".", "errno", "!=", "errno", ".", "EEXIST", ":", "raise" ]
Create model_dir directory
[ "Create", "model_dir", "directory" ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/ner.py#L38-L44
7,697
estnltk/estnltk
estnltk/ner.py
ModelStorageUtil.copy_settings
def copy_settings(self, settings_module): """ Copy settings module to the model_dir directory """ source = inspect.getsourcefile(settings_module) dest = os.path.join(self.model_dir, 'settings.py') shutil.copyfile(source, dest)
python
def copy_settings(self, settings_module): source = inspect.getsourcefile(settings_module) dest = os.path.join(self.model_dir, 'settings.py') shutil.copyfile(source, dest)
[ "def", "copy_settings", "(", "self", ",", "settings_module", ")", ":", "source", "=", "inspect", ".", "getsourcefile", "(", "settings_module", ")", "dest", "=", "os", ".", "path", ".", "join", "(", "self", ".", "model_dir", ",", "'settings.py'", ")", "shut...
Copy settings module to the model_dir directory
[ "Copy", "settings", "module", "to", "the", "model_dir", "directory" ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/ner.py#L46-L50
7,698
estnltk/estnltk
estnltk/ner.py
ModelStorageUtil.load_settings
def load_settings(self): """Load settings module from the model_dir directory.""" mname = 'loaded_module' if six.PY2: import imp return imp.load_source(mname, self.settings_filename) else: import importlib.machinery loader = importlib.machinery.SourceFileLoader(mname, self.settings_filename) return loader.load_module(mname)
python
def load_settings(self): mname = 'loaded_module' if six.PY2: import imp return imp.load_source(mname, self.settings_filename) else: import importlib.machinery loader = importlib.machinery.SourceFileLoader(mname, self.settings_filename) return loader.load_module(mname)
[ "def", "load_settings", "(", "self", ")", ":", "mname", "=", "'loaded_module'", "if", "six", ".", "PY2", ":", "import", "imp", "return", "imp", ".", "load_source", "(", "mname", ",", "self", ".", "settings_filename", ")", "else", ":", "import", "importlib"...
Load settings module from the model_dir directory.
[ "Load", "settings", "module", "from", "the", "model_dir", "directory", "." ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/ner.py#L52-L61
7,699
estnltk/estnltk
estnltk/syntax/vislcg3_syntax.py
VISLCG3Pipeline.check_if_vislcg_is_in_path
def check_if_vislcg_is_in_path( self, vislcg_cmd1 ): ''' Checks whether given vislcg_cmd1 is in system's PATH. Returns True, there is a file named vislcg_cmd1 in the path, otherwise returns False; The idea borrows from: http://stackoverflow.com/a/377028 ''' for path in os.environ["PATH"].split( os.pathsep ): path1 = path.strip('"') file1 = os.path.join(path1, vislcg_cmd1) if os.path.isfile(file1) or os.path.isfile(file1+'.exe'): return True return False
python
def check_if_vislcg_is_in_path( self, vislcg_cmd1 ): ''' Checks whether given vislcg_cmd1 is in system's PATH. Returns True, there is a file named vislcg_cmd1 in the path, otherwise returns False; The idea borrows from: http://stackoverflow.com/a/377028 ''' for path in os.environ["PATH"].split( os.pathsep ): path1 = path.strip('"') file1 = os.path.join(path1, vislcg_cmd1) if os.path.isfile(file1) or os.path.isfile(file1+'.exe'): return True return False
[ "def", "check_if_vislcg_is_in_path", "(", "self", ",", "vislcg_cmd1", ")", ":", "for", "path", "in", "os", ".", "environ", "[", "\"PATH\"", "]", ".", "split", "(", "os", ".", "pathsep", ")", ":", "path1", "=", "path", ".", "strip", "(", "'\"'", ")", ...
Checks whether given vislcg_cmd1 is in system's PATH. Returns True, there is a file named vislcg_cmd1 in the path, otherwise returns False; The idea borrows from: http://stackoverflow.com/a/377028
[ "Checks", "whether", "given", "vislcg_cmd1", "is", "in", "system", "s", "PATH", ".", "Returns", "True", "there", "is", "a", "file", "named", "vislcg_cmd1", "in", "the", "path", "otherwise", "returns", "False", ";" ]
28ae334a68a0673072febc318635f04da0dcc54a
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/vislcg3_syntax.py#L199-L210