docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Gets passage Args: offset: passage offset Return: the passage with specified offset
def get_passage(self, offset: int) -> BioCPassage or None: for passage in self.passages: if passage.offset == offset: return passage return None
685,968
Add the given file_list to workbench as samples, also add them as nodes. Args: workbench: Instance of Workbench Client. file_list: list of files. labels: labels for the nodes. Returns: A list of md5s.
def add_it(workbench, file_list, labels): md5s = [] for filename in file_list: if filename != '.DS_Store': with open(filename, 'rb') as pe_file: base_name = os.path.basename(filename) md5 = workbench.store_sample(pe_file.read(), base_name, 'exe') workbench.add_node(md5, md5[:6], labels) md5s.append(md5) return md5s
685,973
Compute Jaccard similarities between all the observations in the feature list. Args: feature_list: a list of dictionaries, each having structure as { 'md5' : String, 'features': list of Strings } Returns: list of dictionaries with structure as {'source': md5 String, 'target': md5 String, 'sim': Jaccard similarity Number}
def jaccard_sims(feature_list): sim_info_list = [] for feature_info in feature_list: md5_source = feature_info['md5'] features_source = feature_info['features'] for feature_info in feature_list: md5_target = feature_info['md5'] features_target = feature_info['features'] if md5_source == md5_target: continue sim = jaccard_sim(features_source, features_target) if sim > .5: sim_info_list.append({'source': md5_source, 'target': md5_target, 'sim': sim}) return sim_info_list
685,974
Compute similarity between two sets using Jaccard similarity. Args: features1: list of PE Symbols. features2: list of PE Symbols. Returns: Returns an int.
def jaccard_sim(features1, features2): set1 = set(features1) set2 = set(features2) try: return len(set1.intersection(set2))/float(max(len(set1), len(set2))) except ZeroDivisionError: return 0
685,975
Return text with its offset in the document Args: obj: BioCDocument, BioCPassage, or BioCSentence Returns: offset, text
def get_text(obj) -> Tuple[int, str]: from bioc.bioc import BioCDocument, BioCPassage, BioCSentence if isinstance(obj, BioCSentence): return obj.offset, obj.text if isinstance(obj, BioCPassage): if obj.text: return obj.offset, obj.text text = '' for sentence in obj.sentences: try: text = pad_char(text, sentence.offset - obj.offset, ' ') assert sentence.text, f'BioC sentence has no text: {sentence.offset}' text += sentence.text except ValueError: raise ValueError(f'Overlapping sentences {sentence.offset}') return obj.offset, text if isinstance(obj, BioCDocument): text = '' for passage in obj.passages: try: text = pad_char(text, passage.offset) text += get_text(passage)[1] except ValueError: raise ValueError(f'{obj.id}: overlapping passages {passage.offset}') return 0, text raise TypeError(f'Object of type {obj.__class__.__name__} must be BioCCollection, ' f'BioCDocument, BioCPassage, or BioCSentence')
685,978
Get all annotations in document id. Args: obj: BioCCollection, BioCDocument, BioCPassage, or BioCSentence docid: document id. If None, all documents level: one of DOCUMENT, PASSAGE, SENTENCE Yields: one annotation
def annotations(obj: BioCCollection or BioCDocument or BioCPassage or BioCSentence, docid: str = None, level: int = PASSAGE) -> Generator[BioCAnnotation, None, None]: if isinstance(obj, BioCCollection): for document in filter(lambda d: docid is None or docid == d.id, obj.documents): yield from annotations(document, level=level) elif isinstance(obj, BioCDocument): if level == DOCUMENT: yield from obj.annotations elif level in (PASSAGE, SENTENCE): for passage in obj.passages: yield from annotations(passage, level=level) else: raise ValueError('level must be DOCUMENT, PASSAGE, or SENTENCE') elif isinstance(obj, BioCPassage): if level == PASSAGE: yield from obj.annotations elif level == SENTENCE: for sentence in obj.sentences: yield from annotations(sentence, level=level) else: raise ValueError('level must be PASSAGE or SENTENCE') elif isinstance(obj, BioCSentence): if level == SENTENCE: yield from obj.annotations else: raise ValueError('level must be SENTENCE') else: raise TypeError(f'Object of type {obj.__class__.__name__} must be BioCCollection, ' f'BioCDocument, BioCPassage, or BioCSentence')
685,994
Get all sentences in document id. Args: obj: BioCCollection, BioCDocument, or BioCPassage Yields: one sentence
def sentences(obj: BioCCollection or BioCDocument or BioCPassage) \ -> Generator[BioCSentence, None, None]: if isinstance(obj, BioCCollection): for document in obj.documents: yield from sentences(document) elif isinstance(obj, BioCDocument): for passage in obj.passages: yield from sentences(passage) elif isinstance(obj, BioCPassage): yield from obj.sentences else: raise TypeError(f'Object of type {obj.__class__.__name__} must be BioCCollection, ' f'BioCDocument, BioCPassage, or BioCSentence')
685,995
Serialize ``collection`` as a BioC formatted stream to ``fp``. Args: collection: the BioC collection fp: a ``.write()``-supporting file-like object pretty_print: enables formatted XML
def dump(collection: BioCCollection, fp, pretty_print: bool = True): fp.write(dumps(collection, pretty_print))
685,997
Serialize ``collection`` to a BioC formatted ``str``. Args: collection: the BioC collection pretty_print: enables formatted XML Returns: a BioC formatted ``str``
def dumps(collection: BioCCollection, pretty_print: bool = True) -> str: doc = etree.ElementTree(BioCXMLEncoder().encode(collection)) s = etree.tostring(doc, pretty_print=pretty_print, encoding=collection.encoding, standalone=collection.standalone) return s.decode(collection.encoding)
685,998
The read_log method returns a memory efficient generator for rows in a Bro log. Usage: rows = my_bro_reader.read_log(logfile) for row in rows: do something with row Args: logfile: The Bro Log file.
def read_log(self, logfile): # Make sure we're at the beginning logfile.seek(0) # First parse the header of the bro log field_names, _ = self._parse_bro_header(logfile) # Note: SO stupid to write a csv reader, but csv.DictReader on Bro # files was doing something weird with generator output that # affected zeroRPC and gave 'could not route _zpc_more' error. # So wrote my own, put a sleep at the end, seems to fix it. while 1: _line = next(logfile).strip() if not _line.startswith('#close'): yield self._cast_dict(dict(zip(field_names, _line.split(self.delimiter)))) else: time.sleep(.1) # Give time for zeroRPC to finish messages break
686,037
Internal method that makes sure any dictionary elements are properly cast into the correct types, instead of just treating everything like a string from the csv file. Args: data_dict: dictionary containing bro log data. Returns: Cleaned Data dict.
def _cast_dict(self, data_dict): for key, value in data_dict.iteritems(): data_dict[key] = self._cast_value(value) # Fixme: resp_body_data can be very large so removing it for now if 'resp_body_data' in data_dict: del data_dict['resp_body_data'] return data_dict
686,039
Internal method that makes sure every value in dictionary is properly cast into the correct types, instead of just treating everything like a string from the csv file. Args: value : The value to be casted Returns: A casted Value.
def _cast_value(self, value): # Try to convert to a datetime (if requested) if (self.convert_datetimes): try: date_time = datetime.datetime.fromtimestamp(float(value)) if datetime.datetime(1970, 1, 1) > date_time: raise ValueError else: return date_time # Next try a set of primitive types except ValueError: pass # Try conversion to basic types tests = (int, float, str) for test in tests: try: return test(value) except ValueError: continue return value
686,040
Initialization for NeoDB indexer. Args: uri: The uri to connect NeoDB. Raises: RuntimeError: When connection to NeoDB failed.
def __init__(self, uri='http://localhost:7474/db/data'): # Get connection to Neo4j try: # Open the Neo4j DB and get version (just testing Neo connection) self.graph_db = neo4j.GraphDatabaseService(uri) version = self.graph_db.neo4j_version print '\t- Neo4j GraphDB connected: %s %s' % (str(uri), version) except packages.httpstream.http.SocketError: print '\t- Neo4j connection failed! Is your Neo4j server running? $ neo4j start' raise RuntimeError('Could not connect to Neo4j')
686,052
Add the node with name and labels. Args: node_id: Id for the node. name: Name for the node. labels: Label for the node. Raises: NotImplementedError: When adding labels is not supported.
def add_node(self, node_id, name, labels): node = self.graph_db.get_or_create_indexed_node('Node', 'node_id', node_id, {'node_id': node_id, 'name': name}) try: node.add_labels(*labels) except NotImplementedError: pass
686,053
Add a relationship between nodes. Args: source_node_id: Node Id for the source node. target_node_id: Node Id for the target node. rel: Name of the relationship 'contains'
def add_rel(self, source_node_id, target_node_id, rel): # Add the relationship n1_ref = self.graph_db.get_indexed_node('Node', 'node_id', source_node_id) n2_ref = self.graph_db.get_indexed_node('Node', 'node_id', target_node_id) # Sanity check if not n1_ref or not n2_ref: print 'Cannot add relationship between unfound nodes: %s --> %s' % (source_node_id, target_node_id) return path = neo4j.Path(n1_ref, rel, n2_ref) path.get_or_create(self.graph_db)
686,054
Deserialize fp (a .read()-supporting text file or binary file containing a JSON document) to a BioCCollection object Args: fp: a file containing a JSON document **kwargs: Returns: BioCCollection: a collection
def load(fp, **kwargs) -> BioCCollection: obj = json.load(fp, **kwargs) return parse_collection(obj)
686,099
Deserialize s (a str, bytes or bytearray instance containing a JSON document) to a BioCCollection object. Args: s(str): **kwargs: Returns: BioCCollection: a collection
def loads(s: str, **kwargs) -> BioCCollection: obj = json.loads(s, **kwargs) return parse_collection(obj)
686,100
Initialize the Directory Watcher Args: path: path of the directory to watch
def __init__(self, path): self.path = path self.on_create = None self.on_modify = None self.on_delete = None self.jobs = None
686,106
Schedule a timer with the given callable and the interval in seconds. The interval value is also passed to the callable. If the callable takes longer than the timer interval, all accumulated callable's tasks will be cancelled when the timer is cancelled. Args: cb: TODO - fill argument descriptions Returns: You can stop the timer by cancelling the returned task.
def create_timer(cb: Callable[[float], None], interval: float, delay_policy: TimerDelayPolicy = TimerDelayPolicy.DEFAULT, loop: Optional[asyncio.BaseEventLoop] = None) -> asyncio.Task: if not loop: loop = asyncio.get_event_loop() async def _timer(): fired_tasks = [] try: while True: if delay_policy == TimerDelayPolicy.CANCEL: for t in fired_tasks: if not t.done(): t.cancel() await t fired_tasks.clear() else: fired_tasks[:] = [t for t in fired_tasks if not t.done()] t = loop.create_task(cb(interval=interval)) fired_tasks.append(t) await asyncio.sleep(interval) except asyncio.CancelledError: for t in fired_tasks: t.cancel() await asyncio.gather(*fired_tasks) return loop.create_task(_timer())
687,828
Expression for an input attribute. An input attribute is an attribute on one of the input ports of the operator invocation. `stream` must have been used to declare this invocation. Args: stream(Stream): Stream the attribute is from. name(str): Name of the attribute. Returns: Expression: Expression representing the input attribute.
def attribute(self, stream, name): if stream not in self._inputs: raise ValueError("Stream is not an input of this operator.") if len(self._inputs) == 1: return Expression('attribute', name) else: iport = self._op().inputPorts[self._inputs.index(stream)] return Expression('attribute', iport._alias + '.' + name)
688,767
SPL output port assignment expression. Arguments: stream(Stream): Output stream the assignment is for. value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`. Returns: Expression: Output assignment expression that is valid as a the context of this operator.
def output(self, stream, value): if stream not in self.outputs: raise ValueError("Stream is not an output of this operator.") e = self.expression(value) e._stream = stream return e
688,768
SPL output port assignment expression. Arguments: value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`. Returns: Expression: Output assignment expression that is valid as a the context of this operator.
def output(self, value): return super(Source, self).output(self.stream, value)
688,771
Expression for an input attribute. An input attribute is an attribute on the input port of the operator invocation. Args: name(str): Name of the attribute. Returns: Expression: Expression representing the input attribute.
def attribute(self, name): return super(Map, self).attribute(self._inputs[0], name)
688,773
SPL output port assignment expression. Arguments: value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`. Returns: Expression: Output assignment expression that is valid as a the context of this operator.
def output(self, value): return super(Map, self).output(self.stream, value)
688,774
Create an SPL expression. Args: value: Expression as a string or another `Expression`. If value is an instance of `Expression` then a new instance is returned containing the same type and value. Returns: Expression: SPL expression from `value`.
def expression(value): if isinstance(value, Expression): # Clone the expression to allow it to # be used in multiple contexts return Expression(value._type, value._value) if hasattr(value, 'spl_json'): sj = value.spl_json() return Expression(sj['type'], sj['value']) return Expression('splexpr', value)
688,777
Convert a datetime to an SPL `Timestamp`. Args: dt(datetime.datetime): Datetime to be converted. machine_id(int): Machine identifier. Returns: Timestamp: Datetime converted to Timestamp.
def from_datetime(dt, machine_id=0): td = dt - Timestamp._EPOCH seconds = td.days * 3600 * 24 seconds += td.seconds return Timestamp(seconds, td.microseconds*1000, machine_id)
688,807
Add an SPL toolkit to a topology. Args: topology(Topology): Topology to include toolkit in. location(str): Location of the toolkit directory.
def add_toolkit(topology, location): import streamsx.topology.topology assert isinstance(topology, streamsx.topology.topology.Topology) tkinfo = dict() tkinfo['root'] = os.path.abspath(location) topology.graph._spl_toolkits.append(tkinfo)
688,810
Decorator to ignore a Python function. If a Python callable is decorated with ``@spl.ignore`` then function is ignored by ``spl-python-extract.py``. Args: wrapped: Function that will be ignored.
def ignore(wrapped): @functools.wraps(wrapped) def _ignore(*args, **kwargs): return wrapped(*args, **kwargs) _ignore._splpy_optype = _OperatorType.Ignore _ignore._splpy_file = inspect.getsourcefile(wrapped) return _ignore
688,833
Cancel this job. Args: force (bool, optional): Forcefully cancel this job. Returns: bool: True if the job was cancelled, otherwise False if an error occurred.
def cancel(self, force=False): return self.rest_client._sc._delegator._cancel_job(self, force)
688,977
Retrieves jobs running on this resource in its instance. Args: name (str, optional): Only return jobs containing property **name** that matches `name`. `name` can be a regular expression. If `name` is not supplied, then all jobs are returned. Returns: list(Job): A list of jobs matching the given `name`. .. note:: If ``applicationResource`` is `False` an empty list is returned. .. versionadded:: 1.9
def get_jobs(self, name=None): if self.applicationResource: return self._get_elements(self.jobs, 'jobs', Job, None, name) else: return []
688,985
Retrieves a job matching the given `id` Args: id (str): Job `id` to match. Returns: Job: Job matching the given `id` Raises: ValueError: No resource matches given `id` or multiple resources matching given `id`
def get_job(self, id): return self._get_element_by_id(self.jobs, 'jobs', Job, str(id))
688,993
Retrieves application configurations for this instance. Args: name (str, optional): Only return application configurations containing property **name** that matches `name`. `name` can be a regular expression. If `name` is not supplied, then all application configurations are returned. Returns: list(ApplicationConfiguration): A list of application configurations matching the given `name`. .. versionadded 1.12
def get_application_configurations(self, name=None): if hasattr(self, 'applicationConfigurations'): return self._get_elements(self.applicationConfigurations, 'applicationConfigurations', ApplicationConfiguration, None, name)
688,995
Create an application configuration. Args: name (str, optional): Only return application configurations containing property **name** that matches `name`. `name` can be a .. versionadded 1.12
def create_application_configuration(self, name, properties, description=None): if not hasattr(self, 'applicationConfigurations'): raise NotImplementedError() cv = ApplicationConfiguration._props(name, properties, description) res = self.rest_client.session.post(self.applicationConfigurations, headers = {'Accept' : 'application/json'}, json=cv) _handle_http_errors(res) return ApplicationConfiguration(res.json(), self.rest_client)
688,996
Cancel a running job. Args: job_id (str, optional): Identifier of job to be canceled. job_name (str, optional): Name of job to be canceled. Returns: dict: JSON response for the job cancel operation.
def cancel_job(self, job_id=None, job_name=None): return self._delegator.cancel_job(job_id=job_id, job_name = job_name)
689,002
Cancel a running job. Args: job_id (str, optional): Identifier of job to be canceled. job_name (str, optional): Name of job to be canceled. Returns: dict: JSON response for the job cancel operation.
def cancel_job(self, job_id=None, job_name=None): payload = {} if job_name is not None: payload['job_name'] = job_name if job_id is not None: payload['job_id'] = job_id jobs_url = self._get_url('jobs_path') res = self.rest_client.session.delete(jobs_url, params=payload) _handle_http_errors(res) return res.json()
689,014
Submit this Streams Application Bundle (sab file) to its associated instance. Args: job_config(JobConfig): a job configuration overlay Returns: Job: Resulting job instance.
def submit_job(self, job_config=None): job_id = self._delegator._submit_bundle(self, job_config) return self._instance.get_job(job_id)
689,019
Is `schema` an common schema. Args: schema: Scheme to test. Returns: bool: ``True`` if schema is a common schema, otherwise ``False``.
def is_common(schema): if isinstance(schema, StreamSchema): return schema.schema() in _SCHEMA_COMMON if isinstance(schema, CommonSchema): return True if isinstance(schema, basestring): return is_common(StreamSchema(schema)) return False
689,030
Extend a structured schema by another. For example extending ``tuple<rstring id, timestamp ts, float64 value>`` with ``tuple<float32 score>`` results in ``tuple<rstring id, timestamp ts, float64 value, float32 score>``. Args: schema(StreamSchema): Schema to extend this schema by. Returns: StreamSchema: New schema that is an extension of this schema.
def extend(self, schema): if self._spl_type: raise TypeError("Not supported for declared SPL types") base = self.schema() extends = schema.schema() new_schema = base[:-1] + ',' + extends[6:] return StreamSchema(new_schema)
689,049
Indicates that the stream is the start of a consistent region. Args: consistent_config(consistent.ConsistentRegionConfig): the configuration of the consistent region. Returns: Stream: Returns this stream. .. versionadded:: 1.11
def set_consistent(self, consistent_config): # add job control plane if needed self.topology._add_job_control_plane() self.oport.operator.consistent(consistent_config) return self._make_placeable()
689,084
Creates a stream that is a union of this stream and other streams Args: streamSet: a set of Stream objects to merge with this stream Returns: Stream:
def union(self, streamSet): if(not isinstance(streamSet,set)) : raise TypeError("The union operator parameter must be a set object") if(len(streamSet) == 0): return self op = self.topology.graph.addOperator("$Union$") op.addInputPort(outputPort=self.oport) for stream in streamSet: op.addInputPort(outputPort=stream.oport) oport = op.addOutputPort(schema=self.oport.schema) return Stream(self.topology, oport)
689,086
Complete the pending stream. Any connections made to :py:attr:`stream` are connected to `stream` once this method returns. Args: stream(Stream): Stream that completes the connection.
def complete(self, stream): assert not self.is_complete() self._marker.addInputPort(outputPort=stream.oport) self.stream.oport.schema = stream.oport.schema # Update the pending schema to the actual schema # Any downstream filters that took the reference # will be automatically updated to the correct schema self._pending_schema._set(self.stream.oport.schema) # Mark the operator with the pending stream # a start point for graph travesal stream.oport.operator._start_op = True
689,098
Retrieves the Streams REST API URL from the provided credentials. Args: session (:py:class:`requests.Session`): A Requests session object for making REST calls credentials (dict): A dict representation of the credentials. Returns: str: The remote Streams REST API URL.
def _get_rest_api_url_from_creds(session, credentials): resources_url = credentials['rest_url'] + credentials['resources_path'] try: response_raw = session.get(resources_url, auth=(credentials['userid'], credentials['password'])) response = response_raw.json() except: logger.error("Error while retrieving rest REST url from: " + resources_url) raise response_raw.raise_for_status() rest_api_url = response['streams_rest_url'] + '/resources' return rest_api_url
689,134
Retrieves the Streams REST API URL from the provided credentials using iam authentication. Args: rest_client (:py:class:`rest_primitives._IAMStreamsRestClient`): A client for making REST calls using IAM authentication credentials (dict): A dict representation of the credentials. Returns: str: The remote Streams REST API URL.
def _get_iam_rest_api_url_from_creds(rest_client, credentials): res = rest_client.make_request(credentials[_IAMConstants.V2_REST_URL]) base = res['streams_self'] end = base.find('/instances') return base[:end] + '/resources'
689,135
Initialize Cleverbot with the given arguments. Arguments: key: The key argument is always required. It is your API key. cs: The cs argument stands for "cleverbot state". It is the encoded state of the conversation so far and includes the whole conversation history up to that point. timeout: How many seconds to wait for the API to respond before giving up and raising an error.
def __init__(self, *args, **kwargs): super(Cleverbot, self).__init__(*args, **kwargs) self.session = requests.Session() headers = {'User-Agent': 'cleverbot.py/' + __version__ + ' ' '(+https://github.com/orlnub123/cleverbot.py)'} self.session.headers.update(headers)
690,158
Make a new conversation. Arguments: name: The key for the dictionary the conversation will be stored as in conversations. If None the conversation will be stored as a list instead. Mixing both types results in an error. **kwargs: Keyword arguments to pass into the new conversation. These accept the same arguments as Cleverbot. Returns: The new conversation.
def conversation(self, name=None, **kwargs): convo = Conversation(self, **kwargs) super().conversation(name, convo) return convo
690,256
Returns tree objects from a sentence Args: line: Sentence to be parsed into a tree Returns: Tree object representing parsed sentence None if parse fails
def parse(self, line): tree = list(self.parser.raw_parse(line))[0] tree = tree[0] return tree
691,183
Recursively matches a Tree structure with rules and returns context Args: tree (Tree): Parsed tree structure rules (dict): See match_rules parent_context (dict): Context of parent call Returns: dict: Context matched dictionary of matched rules or None if no match
def match_rules_context(tree, rules, parent_context={}): for template, match_rules in rules.items(): context = parent_context.copy() if match_template(tree, template, context): for key, child_rules in match_rules.items(): child_context = match_rules_context(context[key], child_rules, context) if child_context: for k, v in child_context.items(): context[k] = v else: return None return context return None
691,545
Recursively matches a Tree structure with rules and returns context Args: tree (Tree): Parsed tree structure rules (dict): See match_rules parent_context (dict): Context of parent call Returns: dict: Context matched dictionary of matched rules or None if no match
def match_rules_context_multi(tree, rules, parent_context={}): all_contexts = [] for template, match_rules in rules.items(): context = parent_context.copy() if match_template(tree, template, context): child_contextss = [] if not match_rules: all_contexts += [context] else: for key, child_rules in match_rules.items(): child_contextss.append(match_rules_context_multi(context[key], child_rules, context)) all_contexts += cross_context(child_contextss) return all_contexts
691,547
Check if match string matches Tree structure Args: tree (Tree): Parsed Tree structure of a sentence template (str): String template to match. Example: "( S ( NP ) )" Returns: bool: If they match or not
def match_template(tree, template, args=None): tokens = get_tokens(template.split()) cur_args = {} if match_tokens(tree, tokens, cur_args): if args is not None: for k, v in cur_args.items(): args[k] = v logger.debug('MATCHED: {0}'.format(template)) return True else: return False
691,548
Recursively gets tokens from a match list Args: tokens : List of tokens ['(', 'S', '(', 'NP', ')', ')'] Returns: Stack of tokens
def get_tokens(tokens): tokens = tokens[1:-1] ret = [] start = 0 stack = 0 for i in range(len(tokens)): if tokens[i] == '(': if stack == 0: start = i stack += 1 elif tokens[i] == ')': stack -= 1 if stack < 0: raise Exception('Bracket mismatch: ' + str(tokens)) if stack == 0: ret.append(get_tokens(tokens[start:i + 1])) else: if stack == 0: ret.append(tokens[i]) if stack != 0: raise Exception('Bracket mismatch: ' + str(tokens)) return ret
691,550
Get the object in the tree object. Method should remove unnecessary letters and words:: the a/an 's Args: tree (Tree): Parsed tree structure Returns: Resulting string of tree ``(Ex: "red car")``
def get_object(tree): if isinstance(tree, Tree): if tree.label() == 'DT' or tree.label() == 'POS': return '' words = [] for child in tree: words.append(get_object(child)) return ' '.join([_f for _f in words if _f]) else: return tree
691,551
Get the exact words in lowercase in the tree object. Args: tree (Tree): Parsed tree structure Returns: Resulting string of tree ``(Ex: "The red car")``
def get_raw(tree): if isinstance(tree, Tree): words = [] for child in tree: words.append(get_raw(child)) return ' '.join(words) else: return tree
691,552
Sphinx extension to update the rendering context with the feedback form URL. Arguments: app (Sphinx): Application object for the Sphinx process Returns: a dictionary of metadata (http://www.sphinx-doc.org/en/stable/extdev/#extension-metadata)
def setup(app): event = 'html-page-context' if six.PY3 else b'html-page-context' app.connect(event, update_context) return { 'parallel_read_safe': True, 'parallel_write_safe': True, 'version': __version__, }
691,940
Work with the AccessPolicy in a SystemMetadata PyXB object. Args: access_pyxb : AccessPolicy PyXB object The AccessPolicy to modify. read_only: bool Do not update the wrapped AccessPolicy. When only a single AccessPolicy operation is needed, there's no need to use this context manager. Instead, use the generated context manager wrappers.
def wrap(access_pyxb, read_only=False): w = AccessPolicyWrapper(access_pyxb) yield w if not read_only: w.get_normalized_pyxb()
692,692
Add a permission for a subject. Args: subj_str : str Subject for which to add permission(s) perm_str : str Permission to add. Implicitly adds all lower permissions. E.g., ``write`` will also add ``read``.
def add_perm(self, subj_str, perm_str): self._assert_valid_permission(perm_str) self._perm_dict.setdefault(perm_str, set()).add(subj_str)
692,703
Remove permission from a subject. Args: subj_str : str Subject for which to remove permission(s) perm_str : str Permission to remove. Implicitly removes all higher permissions. E.g., ``write`` will also remove ``changePermission`` if previously granted.
def remove_perm(self, subj_str, perm_str): self._assert_valid_permission(perm_str) for perm_str in self._equal_or_higher_perm(perm_str): self._perm_dict.setdefault(perm_str, set()).discard(subj_str)
692,704
Initialize the `Wilson` class. Parameters: - `wcdict`: dictionary of Wilson coefficient values at the input scale. The keys must exist as Wilson coefficients in the WCxf basis file. The values must be real or complex numbers (not dictionaries with key 'Re'/'Im'!) - `scale`: input scale in GeV - `eft`: input EFT - `basis`: input basis
def __init__(self, wcdict, scale, eft, basis): super().__init__() self.wc = wcxf.WC(eft=eft, basis=basis, scale=scale, values=wcxf.WC.dict2values(wcdict)) self.wc.validate() self._cache = {}
692,938
Initialize. Parameters: - fun: function of the scale that is expected to return a dictionary with the RGE solution and to accept vectorized input. - scale_min, scale_max: lower and upper boundaries of the scale
def __init__(self, fun, scale_min, scale_max): self.fun = fun self.scale_min = scale_min self.scale_max = scale_max
692,946
Plot the RG evolution of parameter `key`. Parameters: - part, scale, steps: see `plotdata` - legend: boolean, show the legend (default: True) - plotargs: dictionary of arguments to be passed to plt.plot
def plot(self, key, part='re', scale='log', steps=50, legend=True, plotargs={}): try: import matplotlib.pyplot as plt except ImportError: raise ImportError("Please install matplotlib if you want to use the plot method") pdat = self.plotdata(key, part=part, scale=scale, steps=steps) plt.plot(*pdat, label=key, **plotargs) if scale == 'log': plt.xscale('log') if legend: plt.legend()
692,948
Evolve the Wilson coefficients to the scale `scale_out`. Parameters: - scale_out: output scale - sectors: optional. If provided, must be a tuple of strings corresponding to WCxf sector names. Only Wilson coefficients belonging to these sectors will be present in the output. Returns an instance of `wcxf.WC`.
def run(self, scale_out, sectors='all'): C_out = self._run_dict(scale_out, sectors=sectors) all_wcs = set(wcxf.Basis[self.eft, 'JMS'].all_wcs) # to speed up lookup C_out = {k: v for k, v in C_out.items() if v != 0 and k in all_wcs} return wcxf.WC(eft=self.eft, basis='JMS', scale=scale_out, values=wcxf.WC.dict2values(C_out))
692,958
Serialize to XML ``bytes`` with prolog. Args: encoding: str Encoding to use for XML doc bytes xslt_url: str If specified, add a processing instruction to the XML doc that specifies the download location for an XSLT stylesheet. Returns: bytes: XML holding a DataONEError based type.
def serialize_to_transport(self, encoding='utf-8', xslt_url=None): assert encoding in ('utf-8', 'UTF-8') dataone_exception_pyxb = self.get_pyxb() return d1_common.xml.serialize_for_transport( dataone_exception_pyxb, xslt_url=xslt_url )
693,034
Normalize a ReplicationPolicy PyXB type in place. The preferred and blocked lists are sorted alphabetically. As blocked nodes override preferred nodes, and any node present in both lists is removed from the preferred list. Args: rp_pyxb : ReplicationPolicy PyXB object The object will be normalized in place.
def normalize(rp_pyxb): # noinspection PyMissingOrEmptyDocstring def sort(r, a): d1_common.xml.sort_value_list_pyxb(_get_attr_or_list(r, a)) rp_pyxb.preferredMemberNode = set(_get_attr_or_list(rp_pyxb, 'pref')) - set( _get_attr_or_list(rp_pyxb, 'block') ) sort(rp_pyxb, 'block') sort(rp_pyxb, 'pref')
693,144
Check if two ReplicationPolicy XML docs are semantically equivalent. The ReplicationPolicy XML docs are normalized before comparison. Args: a_xml, b_xml: ReplicationPolicy XML docs to compare Returns: bool: ``True`` if the resulting policies for the two objects are semantically equivalent.
def are_equivalent_xml(a_xml, b_xml): return are_equivalent_pyxb( d1_common.xml.deserialize(a_xml), d1_common.xml.deserialize(b_xml) )
693,145
Convert ReplicationPolicy PyXB object to a normalized dict. Args: rp_pyxb: ReplicationPolicy to convert. Returns: dict : Replication Policy as normalized dict. Example:: { 'allowed': True, 'num': 3, 'blockedMemberNode': {'urn:node:NODE1', 'urn:node:NODE2', 'urn:node:NODE3'}, 'preferredMemberNode': {'urn:node:NODE4', 'urn:node:NODE5'}, }
def pyxb_to_dict(rp_pyxb): return { 'allowed': bool(_get_attr_or_list(rp_pyxb, 'allowed')), 'num': _get_as_int(rp_pyxb), 'block': _get_as_set(rp_pyxb, 'block'), 'pref': _get_as_set(rp_pyxb, 'pref'), }
693,146
Convert dict to ReplicationPolicy PyXB object. Args: rp_dict: Native Python structure representing a Replication Policy. Example:: { 'allowed': True, 'num': 3, 'blockedMemberNode': {'urn:node:NODE1', 'urn:node:NODE2', 'urn:node:NODE3'}, 'preferredMemberNode': {'urn:node:NODE4', 'urn:node:NODE5'}, } Returns: ReplicationPolicy PyXB object.
def dict_to_pyxb(rp_dict): rp_pyxb = d1_common.types.dataoneTypes.replicationPolicy() rp_pyxb.replicationAllowed = rp_dict['allowed'] rp_pyxb.numberReplicas = rp_dict['num'] rp_pyxb.blockedMemberNode = rp_dict['block'] rp_pyxb.preferredMemberNode = rp_dict['pref'] normalize(rp_pyxb) return rp_pyxb
693,147
See Also: listFormatsResponse() Args: vendorSpecific: Returns:
def listFormats(self, vendorSpecific=None): response = self.listFormatsResponse(vendorSpecific) return self._read_dataone_type_response(response, 'ObjectFormatList')
693,159
See Also: getFormatResponse() Args: formatId: vendorSpecific: Returns:
def getFormat(self, formatId, vendorSpecific=None): response = self.getFormatResponse(formatId, vendorSpecific) return self._read_dataone_type_response(response, 'ObjectFormat')
693,160
CNCore.getLogRecords(session[, fromDate][, toDate][, event][, start][, count]) β†’ Log https://releases.dataone.org/online/api- documentation-v2.0.1/apis/CN_APIs.html#CNCore.getLogRecords Implemented in d1_client.baseclient.py. CNCore.reserveIdentifier(session, pid) β†’ Identifier https://releases.dataone.org/online/api-documentation-v2.0.1/apis/CN_APIs.html#CNCore.reserveIdentifier Args: pid: vendorSpecific: Returns:
def reserveIdentifierResponse(self, pid, vendorSpecific=None): mmp_dict = {'pid': pid} return self.POST(['reserve', pid], fields=mmp_dict, headers=vendorSpecific)
693,161
See Also: reserveIdentifierResponse() Args: pid: vendorSpecific: Returns:
def reserveIdentifier(self, pid, vendorSpecific=None): response = self.reserveIdentifierResponse(pid, vendorSpecific) return self._read_dataone_type_response(response, 'Identifier', vendorSpecific)
693,162
See Also: listChecksumAlgorithmsResponse() Args: vendorSpecific: Returns:
def listChecksumAlgorithms(self, vendorSpecific=None): response = self.listChecksumAlgorithmsResponse(vendorSpecific) return self._read_dataone_type_response(response, 'ChecksumAlgorithmList')
693,163
CNCore.setObsoletedBy(session, pid, obsoletedByPid, serialVersion) β†’ boolean https://releases.dataone.org/online/api- documentation-v2.0.1/apis/CN_APIs.html#CNCore.setObsoletedBy. Args: pid: obsoletedByPid: serialVersion: vendorSpecific: Returns:
def setObsoletedByResponse( self, pid, obsoletedByPid, serialVersion, vendorSpecific=None ): mmp_dict = { 'obsoletedByPid': obsoletedByPid, 'serialVersion': str(serialVersion), } return self.PUT(['obsoletedBy', pid], fields=mmp_dict, headers=vendorSpecific)
693,164
See Also: setObsoletedByResponse() Args: pid: obsoletedByPid: serialVersion: vendorSpecific: Returns:
def setObsoletedBy(self, pid, obsoletedByPid, serialVersion, vendorSpecific=None): response = self.setObsoletedByResponse( pid, obsoletedByPid, serialVersion, vendorSpecific ) return self._read_boolean_response(response)
693,165
See Also: listNodesResponse() Args: vendorSpecific: Returns:
def listNodes(self, vendorSpecific=None): response = self.listNodesResponse(vendorSpecific) return self._read_dataone_type_response(response, 'NodeList')
693,166
CNCore.registerSystemMetadata(session, pid, sysmeta) β†’ Identifier CN INTERNAL. CNCore.hasReservation(session, pid) β†’ boolean https://releases.dataone.org/online/api-documentation-v2.0.1/apis/CN_APIs.html#CNCore.hasReservation Args: pid: subject: vendorSpecific: Returns:
def hasReservationResponse(self, pid, subject, vendorSpecific=None): return self.GET(['reserve', pid, subject], headers=vendorSpecific)
693,167
See Also: hasReservationResponse() Args: pid: subject: vendorSpecific: Returns:
def hasReservation(self, pid, subject, vendorSpecific=None): response = self.hasReservationResponse(pid, subject, vendorSpecific) return self._read_boolean_404_response(response)
693,168
See Also: resolveResponse() Args: pid: vendorSpecific: Returns:
def resolve(self, pid, vendorSpecific=None): response = self.resolveResponse(pid, vendorSpecific) return self._read_dataone_type_response( response, 'ObjectLocationList', response_is_303_redirect=True )
693,169
CNRead.search(session, queryType, query) β†’ ObjectList https://releases.dataone.org/online/api- documentation-v2.0.1/apis/CN_APIs.html#CNRead.search. Args: queryType: query: vendorSpecific: **kwargs: Returns:
def searchResponse(self, queryType, query, vendorSpecific=None, **kwargs): return self.GET( ['search', queryType, query], headers=vendorSpecific, query=kwargs )
693,170
See Also: searchResponse() Args: queryType: query: vendorSpecific: **kwargs: Returns:
def search(self, queryType, query=None, vendorSpecific=None, **kwargs): response = self.searchResponse(queryType, query, vendorSpecific, **kwargs) return self._read_dataone_type_response(response, 'ObjectList')
693,171
CNRead.query(session, queryEngine, query) β†’ OctetStream https://releases.dataone.org/online/api- documentation-v2.0.1/apis/CN_APIs.html#CNRead.query. Args: queryEngine: query: vendorSpecific: **kwargs: Returns:
def queryResponse(self, queryEngine, query=None, vendorSpecific=None, **kwargs): return self.GET( ['query', queryEngine, query], headers=vendorSpecific, query=kwargs )
693,172
See Also: queryResponse() Args: queryEngine: query: vendorSpecific: **kwargs: Returns:
def query(self, queryEngine, query=None, vendorSpecific=None, **kwargs): response = self.queryResponse(queryEngine, query, vendorSpecific, **kwargs) return self._read_stream_response(response)
693,173
CNAuthorization.setRightsHolder(session, pid, userId, serialVersion) β†’ Identifier https://releases.dataone.org/online/api- documentation-v2.0.1/apis/CN_APIs.html#CNAuthorization.setRightsHolder. Args: pid: userId: serialVersion: vendorSpecific: Returns:
def setRightsHolderResponse(self, pid, userId, serialVersion, vendorSpecific=None): mmp_dict = {'userId': userId, 'serialVersion': str(serialVersion)} return self.PUT(['owner', pid], headers=vendorSpecific, fields=mmp_dict)
693,174
See Also: setRightsHolderResponse() Args: pid: userId: serialVersion: vendorSpecific: Returns:
def setRightsHolder(self, pid, userId, serialVersion, vendorSpecific=None): response = self.setRightsHolderResponse( pid, userId, serialVersion, vendorSpecific ) return self._read_boolean_response(response)
693,175
CNAuthorization.setAccessPolicy(session, pid, accessPolicy, serialVersion) β†’ boolean https://releases.dataone.org/online/api- documentation-v2.0.1/apis/CN_APIs.html#CNAuthorization.setAccessPolicy. Args: pid: accessPolicy: serialVersion: vendorSpecific: Returns:
def setAccessPolicyResponse( self, pid, accessPolicy, serialVersion, vendorSpecific=None ): mmp_dict = { 'serialVersion': str(serialVersion), 'accessPolicy': ('accessPolicy.xml', accessPolicy.toxml('utf-8')), } return self.PUT(['accessRules', pid], fields=mmp_dict, headers=vendorSpecific)
693,176
See Also: setAccessPolicyResponse() Args: pid: accessPolicy: serialVersion: vendorSpecific: Returns:
def setAccessPolicy(self, pid, accessPolicy, serialVersion, vendorSpecific=None): response = self.setAccessPolicyResponse( pid, accessPolicy, serialVersion, vendorSpecific ) return self._read_boolean_response(response)
693,177
CNIdentity.registerAccount(session, person) β†’ Subject https://releases.dataone.org/online/api- documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.registerAccount. Args: person: vendorSpecific: Returns:
def registerAccountResponse(self, person, vendorSpecific=None): mmp_dict = {'person': ('person.xml', person.toxml('utf-8'))} return self.POST('accounts', fields=mmp_dict, headers=vendorSpecific)
693,178
See Also: registerAccountResponse() Args: person: vendorSpecific: Returns:
def registerAccount(self, person, vendorSpecific=None): response = self.registerAccountResponse(person, vendorSpecific) return self._read_boolean_response(response)
693,179
CNIdentity.updateAccount(session, person) β†’ Subject https://releases.dataone.org/online/api- documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.updateAccount. Args: subject: person: vendorSpecific: Returns:
def updateAccountResponse(self, subject, person, vendorSpecific=None): mmp_dict = {'person': ('person.xml', person.toxml('utf-8'))} return self.PUT(['accounts', subject], fields=mmp_dict, headers=vendorSpecific)
693,180
See Also: updateAccountResponse() Args: subject: person: vendorSpecific: Returns:
def updateAccount(self, subject, person, vendorSpecific=None): response = self.updateAccountResponse(subject, person, vendorSpecific) return self._read_boolean_response(response)
693,181
See Also: verifyAccountResponse() Args: subject: vendorSpecific: Returns:
def verifyAccount(self, subject, vendorSpecific=None): response = self.verifyAccountResponse(subject, vendorSpecific) return self._read_boolean_response(response)
693,182
See Also: getSubjectInfoResponse() Args: subject: vendorSpecific: Returns:
def getSubjectInfo(self, subject, vendorSpecific=None): response = self.getSubjectInfoResponse(subject, vendorSpecific) return self._read_dataone_type_response(response, 'SubjectInfo')
693,183
CNIdentity.listSubjects(session, query, status, start, count) β†’ SubjectList https://releases.dataone.org/online/api- documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.listSubjects. Args: query: status: start: count: vendorSpecific: Returns:
def listSubjectsResponse( self, query, status=None, start=None, count=None, vendorSpecific=None ): url_query = {'status': status, 'start': start, 'count': count, 'query': query} return self.GET('accounts', query=url_query, headers=vendorSpecific)
693,184
See Also: listSubjectsResponse() Args: query: status: start: count: vendorSpecific: Returns:
def listSubjects( self, query, status=None, start=None, count=None, vendorSpecific=None ): response = self.listSubjectsResponse( query, status, start, count, vendorSpecific ) return self._read_dataone_type_response(response, 'SubjectInfo')
693,185
CNIdentity.mapIdentity(session, subject) β†’ boolean https://releases.dataone.org/online/api- documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.mapIdentity. Args: primarySubject: secondarySubject: vendorSpecific: Returns:
def mapIdentityResponse( self, primarySubject, secondarySubject, vendorSpecific=None ): mmp_dict = { 'primarySubject': primarySubject.toxml('utf-8'), 'secondarySubject': secondarySubject.toxml('utf-8'), } return self.POST(['accounts', 'map'], fields=mmp_dict, headers=vendorSpecific)
693,186
See Also: mapIdentityResponse() Args: primarySubject: secondarySubject: vendorSpecific: Returns:
def mapIdentity(self, primarySubject, secondarySubject, vendorSpecific=None): response = self.mapIdentityResponse( primarySubject, secondarySubject, vendorSpecific ) return self._read_boolean_response(response)
693,187
See Also: removeMapIdentityResponse() Args: subject: vendorSpecific: Returns:
def removeMapIdentity(self, subject, vendorSpecific=None): response = self.removeMapIdentityResponse(subject, vendorSpecific) return self._read_boolean_response(response)
693,188
See Also: denyMapIdentityResponse() Args: subject: vendorSpecific: Returns:
def denyMapIdentity(self, subject, vendorSpecific=None): response = self.denyMapIdentityResponse(subject, vendorSpecific) return self._read_boolean_response(response)
693,189
CNIdentity.requestMapIdentity(session, subject) β†’ boolean https://releases.dataone.org/online/api- documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.requestMapIdentity. Args: subject: vendorSpecific: Returns:
def requestMapIdentityResponse(self, subject, vendorSpecific=None): mmp_dict = {'subject': subject.toxml('utf-8')} return self.POST('accounts', fields=mmp_dict, headers=vendorSpecific)
693,190
See Also: requestMapIdentityResponse() Args: subject: vendorSpecific: Returns:
def requestMapIdentity(self, subject, vendorSpecific=None): response = self.requestMapIdentityResponse(subject, vendorSpecific) return self._read_boolean_response(response)
693,191
See Also: confirmMapIdentityResponse() Args: subject: vendorSpecific: Returns:
def confirmMapIdentity(self, subject, vendorSpecific=None): response = self.confirmMapIdentityResponse(subject, vendorSpecific) return self._read_boolean_response(response)
693,192
CNIdentity.createGroup(session, groupName) β†’ Subject https://releases.dataone.org/online/api- documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.createGroup. Args: group: vendorSpecific: Returns:
def createGroupResponse(self, group, vendorSpecific=None): mmp_dict = {'group': ('group.xml', group.toxml('utf-8'))} return self.POST('groups', fields=mmp_dict, headers=vendorSpecific)
693,193