id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
12,200
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/namespace_range.py
get_namespace_keys
def get_namespace_keys(app, limit): """Get namespace keys.""" ns_query = datastore.Query('__namespace__', keys_only=True, _app=app) return list(ns_query.Run(limit=limit, batch_size=limit))
python
def get_namespace_keys(app, limit): ns_query = datastore.Query('__namespace__', keys_only=True, _app=app) return list(ns_query.Run(limit=limit, batch_size=limit))
[ "def", "get_namespace_keys", "(", "app", ",", "limit", ")", ":", "ns_query", "=", "datastore", ".", "Query", "(", "'__namespace__'", ",", "keys_only", "=", "True", ",", "_app", "=", "app", ")", "return", "list", "(", "ns_query", ".", "Run", "(", "limit",...
Get namespace keys.
[ "Get", "namespace", "keys", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/namespace_range.py#L457-L460
12,201
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/namespace_range.py
NamespaceRange.split_range
def split_range(self): """Splits the NamespaceRange into two nearly equal-sized ranges. Returns: If this NamespaceRange contains a single namespace then a list containing this NamespaceRange is returned. Otherwise a two-element list containing two NamespaceRanges whose total range is identical to this NamespaceRange's is returned. """ if self.is_single_namespace: return [self] mid_point = (_namespace_to_ord(self.namespace_start) + _namespace_to_ord(self.namespace_end)) // 2 return [NamespaceRange(self.namespace_start, _ord_to_namespace(mid_point), _app=self.app), NamespaceRange(_ord_to_namespace(mid_point+1), self.namespace_end, _app=self.app)]
python
def split_range(self): if self.is_single_namespace: return [self] mid_point = (_namespace_to_ord(self.namespace_start) + _namespace_to_ord(self.namespace_end)) // 2 return [NamespaceRange(self.namespace_start, _ord_to_namespace(mid_point), _app=self.app), NamespaceRange(_ord_to_namespace(mid_point+1), self.namespace_end, _app=self.app)]
[ "def", "split_range", "(", "self", ")", ":", "if", "self", ".", "is_single_namespace", ":", "return", "[", "self", "]", "mid_point", "=", "(", "_namespace_to_ord", "(", "self", ".", "namespace_start", ")", "+", "_namespace_to_ord", "(", "self", ".", "namespa...
Splits the NamespaceRange into two nearly equal-sized ranges. Returns: If this NamespaceRange contains a single namespace then a list containing this NamespaceRange is returned. Otherwise a two-element list containing two NamespaceRanges whose total range is identical to this NamespaceRange's is returned.
[ "Splits", "the", "NamespaceRange", "into", "two", "nearly", "equal", "-", "sized", "ranges", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/namespace_range.py#L225-L245
12,202
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/namespace_range.py
NamespaceRange.with_start_after
def with_start_after(self, after_namespace): """Returns a copy of this NamespaceName with a new namespace_start. Args: after_namespace: A namespace string. Returns: A NamespaceRange object whose namespace_start is the lexographically next namespace after the given namespace string. Raises: ValueError: if the NamespaceRange includes only a single namespace. """ namespace_start = _ord_to_namespace(_namespace_to_ord(after_namespace) + 1) return NamespaceRange(namespace_start, self.namespace_end, _app=self.app)
python
def with_start_after(self, after_namespace): namespace_start = _ord_to_namespace(_namespace_to_ord(after_namespace) + 1) return NamespaceRange(namespace_start, self.namespace_end, _app=self.app)
[ "def", "with_start_after", "(", "self", ",", "after_namespace", ")", ":", "namespace_start", "=", "_ord_to_namespace", "(", "_namespace_to_ord", "(", "after_namespace", ")", "+", "1", ")", "return", "NamespaceRange", "(", "namespace_start", ",", "self", ".", "name...
Returns a copy of this NamespaceName with a new namespace_start. Args: after_namespace: A namespace string. Returns: A NamespaceRange object whose namespace_start is the lexographically next namespace after the given namespace string. Raises: ValueError: if the NamespaceRange includes only a single namespace.
[ "Returns", "a", "copy", "of", "this", "NamespaceName", "with", "a", "new", "namespace_start", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/namespace_range.py#L267-L281
12,203
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/namespace_range.py
NamespaceRange.make_datastore_query
def make_datastore_query(self, cursor=None): """Returns a datastore.Query that generates all namespaces in the range. Args: cursor: start cursor for the query. Returns: A datastore.Query instance that generates db.Keys for each namespace in the NamespaceRange. """ filters = {} filters['__key__ >= '] = _key_for_namespace( self.namespace_start, self.app) filters['__key__ <= '] = _key_for_namespace( self.namespace_end, self.app) return datastore.Query('__namespace__', filters=filters, keys_only=True, cursor=cursor, _app=self.app)
python
def make_datastore_query(self, cursor=None): filters = {} filters['__key__ >= '] = _key_for_namespace( self.namespace_start, self.app) filters['__key__ <= '] = _key_for_namespace( self.namespace_end, self.app) return datastore.Query('__namespace__', filters=filters, keys_only=True, cursor=cursor, _app=self.app)
[ "def", "make_datastore_query", "(", "self", ",", "cursor", "=", "None", ")", ":", "filters", "=", "{", "}", "filters", "[", "'__key__ >= '", "]", "=", "_key_for_namespace", "(", "self", ".", "namespace_start", ",", "self", ".", "app", ")", "filters", "[", ...
Returns a datastore.Query that generates all namespaces in the range. Args: cursor: start cursor for the query. Returns: A datastore.Query instance that generates db.Keys for each namespace in the NamespaceRange.
[ "Returns", "a", "datastore", ".", "Query", "that", "generates", "all", "namespaces", "in", "the", "range", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/namespace_range.py#L283-L303
12,204
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/namespace_range.py
NamespaceRange.normalized_start
def normalized_start(self): """Returns a NamespaceRange with leading non-existant namespaces removed. Returns: A copy of this NamespaceRange whose namespace_start is adjusted to exclude the portion of the range that contains no actual namespaces in the datastore. None is returned if the NamespaceRange contains no actual namespaces in the datastore. """ namespaces_after_key = list(self.make_datastore_query().Run(limit=1)) if not namespaces_after_key: return None namespace_after_key = namespaces_after_key[0].name() or '' return NamespaceRange(namespace_after_key, self.namespace_end, _app=self.app)
python
def normalized_start(self): namespaces_after_key = list(self.make_datastore_query().Run(limit=1)) if not namespaces_after_key: return None namespace_after_key = namespaces_after_key[0].name() or '' return NamespaceRange(namespace_after_key, self.namespace_end, _app=self.app)
[ "def", "normalized_start", "(", "self", ")", ":", "namespaces_after_key", "=", "list", "(", "self", ".", "make_datastore_query", "(", ")", ".", "Run", "(", "limit", "=", "1", ")", ")", "if", "not", "namespaces_after_key", ":", "return", "None", "namespace_af...
Returns a NamespaceRange with leading non-existant namespaces removed. Returns: A copy of this NamespaceRange whose namespace_start is adjusted to exclude the portion of the range that contains no actual namespaces in the datastore. None is returned if the NamespaceRange contains no actual namespaces in the datastore.
[ "Returns", "a", "NamespaceRange", "with", "leading", "non", "-", "existant", "namespaces", "removed", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/namespace_range.py#L305-L322
12,205
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/namespace_range.py
NamespaceRange.to_json_object
def to_json_object(self): """Returns a dict representation that can be serialized to JSON.""" obj_dict = dict(namespace_start=self.namespace_start, namespace_end=self.namespace_end) if self.app is not None: obj_dict['app'] = self.app return obj_dict
python
def to_json_object(self): obj_dict = dict(namespace_start=self.namespace_start, namespace_end=self.namespace_end) if self.app is not None: obj_dict['app'] = self.app return obj_dict
[ "def", "to_json_object", "(", "self", ")", ":", "obj_dict", "=", "dict", "(", "namespace_start", "=", "self", ".", "namespace_start", ",", "namespace_end", "=", "self", ".", "namespace_end", ")", "if", "self", ".", "app", "is", "not", "None", ":", "obj_dic...
Returns a dict representation that can be serialized to JSON.
[ "Returns", "a", "dict", "representation", "that", "can", "be", "serialized", "to", "JSON", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/namespace_range.py#L324-L330
12,206
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/namespace_range.py
NamespaceRange.split
def split(cls, n, contiguous, can_query=itertools.chain(itertools.repeat(True, 50), itertools.repeat(False)).next, _app=None): # pylint: disable=g-doc-args """Splits the complete NamespaceRange into n equally-sized NamespaceRanges. Args: n: The maximum number of NamespaceRanges to return. Fewer than n namespaces may be returned. contiguous: If True then the returned NamespaceRanges will cover the entire space of possible namespaces (i.e. from MIN_NAMESPACE to MAX_NAMESPACE) without gaps. If False then the returned NamespaceRanges may exclude namespaces that don't appear in the datastore. can_query: A function that returns True if split() can query the datastore to generate more fair namespace range splits, and False otherwise. If not set then split() is allowed to make 50 datastore queries. Returns: A list of at most n NamespaceRanges representing a near-equal distribution of actual existant datastore namespaces. The returned list will be sorted lexographically. Raises: ValueError: if n is < 1. """ if n < 1: raise ValueError('n must be >= 1') ranges = None if can_query(): if not contiguous: ns_keys = get_namespace_keys(_app, n + 1) if not ns_keys: return [] else: if len(ns_keys) <= n: # If you have less actual namespaces than number of NamespaceRanges # to return, then just return the list of those namespaces. ns_range = [] for ns_key in ns_keys: ns_range.append(NamespaceRange(ns_key.name() or '', ns_key.name() or '', _app=_app)) return sorted(ns_range, key=lambda ns_range: ns_range.namespace_start) # Use the first key and save the initial normalized_start() call. ranges = [NamespaceRange(ns_keys[0].name() or '', _app=_app)] else: ns_range = NamespaceRange(_app=_app).normalized_start() if ns_range is None: return [NamespaceRange(_app=_app)] ranges = [ns_range] else: ranges = [NamespaceRange(_app=_app)] singles = [] while ranges and (len(ranges) + len(singles)) < n: namespace_range = ranges.pop(0) if namespace_range.is_single_namespace: singles.append(namespace_range) else: left, right = namespace_range.split_range() if can_query(): right = right.normalized_start() if right is not None: ranges.append(right) ranges.append(left) ns_ranges = sorted(singles + ranges, key=lambda ns_range: ns_range.namespace_start) if contiguous: if not ns_ranges: # This condition is possible if every namespace was deleted after the # first call to ns_range.normalized_start(). return [NamespaceRange(_app=_app)] continuous_ns_ranges = [] for i in range(len(ns_ranges)): if i == 0: namespace_start = MIN_NAMESPACE else: namespace_start = ns_ranges[i].namespace_start if i == len(ns_ranges) - 1: namespace_end = MAX_NAMESPACE else: namespace_end = _ord_to_namespace( _namespace_to_ord(ns_ranges[i+1].namespace_start) - 1) continuous_ns_ranges.append(NamespaceRange(namespace_start, namespace_end, _app=_app)) return continuous_ns_ranges else: return ns_ranges
python
def split(cls, n, contiguous, can_query=itertools.chain(itertools.repeat(True, 50), itertools.repeat(False)).next, _app=None): # pylint: disable=g-doc-args if n < 1: raise ValueError('n must be >= 1') ranges = None if can_query(): if not contiguous: ns_keys = get_namespace_keys(_app, n + 1) if not ns_keys: return [] else: if len(ns_keys) <= n: # If you have less actual namespaces than number of NamespaceRanges # to return, then just return the list of those namespaces. ns_range = [] for ns_key in ns_keys: ns_range.append(NamespaceRange(ns_key.name() or '', ns_key.name() or '', _app=_app)) return sorted(ns_range, key=lambda ns_range: ns_range.namespace_start) # Use the first key and save the initial normalized_start() call. ranges = [NamespaceRange(ns_keys[0].name() or '', _app=_app)] else: ns_range = NamespaceRange(_app=_app).normalized_start() if ns_range is None: return [NamespaceRange(_app=_app)] ranges = [ns_range] else: ranges = [NamespaceRange(_app=_app)] singles = [] while ranges and (len(ranges) + len(singles)) < n: namespace_range = ranges.pop(0) if namespace_range.is_single_namespace: singles.append(namespace_range) else: left, right = namespace_range.split_range() if can_query(): right = right.normalized_start() if right is not None: ranges.append(right) ranges.append(left) ns_ranges = sorted(singles + ranges, key=lambda ns_range: ns_range.namespace_start) if contiguous: if not ns_ranges: # This condition is possible if every namespace was deleted after the # first call to ns_range.normalized_start(). return [NamespaceRange(_app=_app)] continuous_ns_ranges = [] for i in range(len(ns_ranges)): if i == 0: namespace_start = MIN_NAMESPACE else: namespace_start = ns_ranges[i].namespace_start if i == len(ns_ranges) - 1: namespace_end = MAX_NAMESPACE else: namespace_end = _ord_to_namespace( _namespace_to_ord(ns_ranges[i+1].namespace_start) - 1) continuous_ns_ranges.append(NamespaceRange(namespace_start, namespace_end, _app=_app)) return continuous_ns_ranges else: return ns_ranges
[ "def", "split", "(", "cls", ",", "n", ",", "contiguous", ",", "can_query", "=", "itertools", ".", "chain", "(", "itertools", ".", "repeat", "(", "True", ",", "50", ")", ",", "itertools", ".", "repeat", "(", "False", ")", ")", ".", "next", ",", "_ap...
Splits the complete NamespaceRange into n equally-sized NamespaceRanges. Args: n: The maximum number of NamespaceRanges to return. Fewer than n namespaces may be returned. contiguous: If True then the returned NamespaceRanges will cover the entire space of possible namespaces (i.e. from MIN_NAMESPACE to MAX_NAMESPACE) without gaps. If False then the returned NamespaceRanges may exclude namespaces that don't appear in the datastore. can_query: A function that returns True if split() can query the datastore to generate more fair namespace range splits, and False otherwise. If not set then split() is allowed to make 50 datastore queries. Returns: A list of at most n NamespaceRanges representing a near-equal distribution of actual existant datastore namespaces. The returned list will be sorted lexographically. Raises: ValueError: if n is < 1.
[ "Splits", "the", "complete", "NamespaceRange", "into", "n", "equally", "-", "sized", "NamespaceRanges", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/namespace_range.py#L343-L441
12,207
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/output_writers.py
_RecordsPoolBase.append
def append(self, data): """Append data to a file.""" data_length = len(data) if self._size + data_length > self._flush_size: self.flush() if not self._exclusive and data_length > _FILE_POOL_MAX_SIZE: raise errors.Error( "Too big input %s (%s)." % (data_length, _FILE_POOL_MAX_SIZE)) else: self._buffer.append(data) self._size += data_length if self._size > self._flush_size: self.flush()
python
def append(self, data): data_length = len(data) if self._size + data_length > self._flush_size: self.flush() if not self._exclusive and data_length > _FILE_POOL_MAX_SIZE: raise errors.Error( "Too big input %s (%s)." % (data_length, _FILE_POOL_MAX_SIZE)) else: self._buffer.append(data) self._size += data_length if self._size > self._flush_size: self.flush()
[ "def", "append", "(", "self", ",", "data", ")", ":", "data_length", "=", "len", "(", "data", ")", "if", "self", ".", "_size", "+", "data_length", ">", "self", ".", "_flush_size", ":", "self", ".", "flush", "(", ")", "if", "not", "self", ".", "_excl...
Append data to a file.
[ "Append", "data", "to", "a", "file", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L357-L371
12,208
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/output_writers.py
GCSRecordsPool._write
def _write(self, str_buf): """Uses the filehandle to the file in GCS to write to it.""" self._filehandle.write(str_buf) self._buf_size += len(str_buf)
python
def _write(self, str_buf): self._filehandle.write(str_buf) self._buf_size += len(str_buf)
[ "def", "_write", "(", "self", ",", "str_buf", ")", ":", "self", ".", "_filehandle", ".", "write", "(", "str_buf", ")", "self", ".", "_buf_size", "+=", "len", "(", "str_buf", ")" ]
Uses the filehandle to the file in GCS to write to it.
[ "Uses", "the", "filehandle", "to", "the", "file", "in", "GCS", "to", "write", "to", "it", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L432-L435
12,209
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/output_writers.py
_GoogleCloudStorageBase._get_tmp_gcs_bucket
def _get_tmp_gcs_bucket(cls, writer_spec): """Returns bucket used for writing tmp files.""" if cls.TMP_BUCKET_NAME_PARAM in writer_spec: return writer_spec[cls.TMP_BUCKET_NAME_PARAM] return cls._get_gcs_bucket(writer_spec)
python
def _get_tmp_gcs_bucket(cls, writer_spec): if cls.TMP_BUCKET_NAME_PARAM in writer_spec: return writer_spec[cls.TMP_BUCKET_NAME_PARAM] return cls._get_gcs_bucket(writer_spec)
[ "def", "_get_tmp_gcs_bucket", "(", "cls", ",", "writer_spec", ")", ":", "if", "cls", ".", "TMP_BUCKET_NAME_PARAM", "in", "writer_spec", ":", "return", "writer_spec", "[", "cls", ".", "TMP_BUCKET_NAME_PARAM", "]", "return", "cls", ".", "_get_gcs_bucket", "(", "wr...
Returns bucket used for writing tmp files.
[ "Returns", "bucket", "used", "for", "writing", "tmp", "files", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L497-L501
12,210
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/output_writers.py
_GoogleCloudStorageBase._get_tmp_account_id
def _get_tmp_account_id(cls, writer_spec): """Returns the account id to use with tmp bucket.""" # pick tmp id iff tmp bucket is set explicitly if cls.TMP_BUCKET_NAME_PARAM in writer_spec: return writer_spec.get(cls._TMP_ACCOUNT_ID_PARAM, None) return cls._get_account_id(writer_spec)
python
def _get_tmp_account_id(cls, writer_spec): # pick tmp id iff tmp bucket is set explicitly if cls.TMP_BUCKET_NAME_PARAM in writer_spec: return writer_spec.get(cls._TMP_ACCOUNT_ID_PARAM, None) return cls._get_account_id(writer_spec)
[ "def", "_get_tmp_account_id", "(", "cls", ",", "writer_spec", ")", ":", "# pick tmp id iff tmp bucket is set explicitly", "if", "cls", ".", "TMP_BUCKET_NAME_PARAM", "in", "writer_spec", ":", "return", "writer_spec", ".", "get", "(", "cls", ".", "_TMP_ACCOUNT_ID_PARAM", ...
Returns the account id to use with tmp bucket.
[ "Returns", "the", "account", "id", "to", "use", "with", "tmp", "bucket", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L504-L509
12,211
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/output_writers.py
_GoogleCloudStorageOutputWriterBase._generate_filename
def _generate_filename(cls, writer_spec, name, job_id, num, attempt=None, seg_index=None): """Generates a filename for a particular output. Args: writer_spec: specification dictionary for the output writer. name: name of the job. job_id: the ID number assigned to the job. num: shard number. attempt: the shard attempt number. seg_index: index of the seg. None means the final output. Returns: a string containing the filename. Raises: BadWriterParamsError: if the template contains any errors such as invalid syntax or contains unknown substitution placeholders. """ naming_format = cls._TMP_FILE_NAMING_FORMAT if seg_index is None: naming_format = writer_spec.get(cls.NAMING_FORMAT_PARAM, cls._DEFAULT_NAMING_FORMAT) template = string.Template(naming_format) try: # Check that template doesn't use undefined mappings and is formatted well if seg_index is None: return template.substitute(name=name, id=job_id, num=num) else: return template.substitute(name=name, id=job_id, num=num, attempt=attempt, seg=seg_index) except ValueError, error: raise errors.BadWriterParamsError("Naming template is bad, %s" % (error)) except KeyError, error: raise errors.BadWriterParamsError("Naming template '%s' has extra " "mappings, %s" % (naming_format, error))
python
def _generate_filename(cls, writer_spec, name, job_id, num, attempt=None, seg_index=None): naming_format = cls._TMP_FILE_NAMING_FORMAT if seg_index is None: naming_format = writer_spec.get(cls.NAMING_FORMAT_PARAM, cls._DEFAULT_NAMING_FORMAT) template = string.Template(naming_format) try: # Check that template doesn't use undefined mappings and is formatted well if seg_index is None: return template.substitute(name=name, id=job_id, num=num) else: return template.substitute(name=name, id=job_id, num=num, attempt=attempt, seg=seg_index) except ValueError, error: raise errors.BadWriterParamsError("Naming template is bad, %s" % (error)) except KeyError, error: raise errors.BadWriterParamsError("Naming template '%s' has extra " "mappings, %s" % (naming_format, error))
[ "def", "_generate_filename", "(", "cls", ",", "writer_spec", ",", "name", ",", "job_id", ",", "num", ",", "attempt", "=", "None", ",", "seg_index", "=", "None", ")", ":", "naming_format", "=", "cls", ".", "_TMP_FILE_NAMING_FORMAT", "if", "seg_index", "is", ...
Generates a filename for a particular output. Args: writer_spec: specification dictionary for the output writer. name: name of the job. job_id: the ID number assigned to the job. num: shard number. attempt: the shard attempt number. seg_index: index of the seg. None means the final output. Returns: a string containing the filename. Raises: BadWriterParamsError: if the template contains any errors such as invalid syntax or contains unknown substitution placeholders.
[ "Generates", "a", "filename", "for", "a", "particular", "output", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L536-L573
12,212
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/output_writers.py
_GoogleCloudStorageOutputWriterBase._open_file
def _open_file(cls, writer_spec, filename_suffix, use_tmp_bucket=False): """Opens a new gcs file for writing.""" if use_tmp_bucket: bucket = cls._get_tmp_gcs_bucket(writer_spec) account_id = cls._get_tmp_account_id(writer_spec) else: bucket = cls._get_gcs_bucket(writer_spec) account_id = cls._get_account_id(writer_spec) # GoogleCloudStorage format for filenames, Initial slash is required filename = "/%s/%s" % (bucket, filename_suffix) content_type = writer_spec.get(cls.CONTENT_TYPE_PARAM, None) options = {} if cls.ACL_PARAM in writer_spec: options["x-goog-acl"] = writer_spec.get(cls.ACL_PARAM) return cloudstorage.open(filename, mode="w", content_type=content_type, options=options, _account_id=account_id)
python
def _open_file(cls, writer_spec, filename_suffix, use_tmp_bucket=False): if use_tmp_bucket: bucket = cls._get_tmp_gcs_bucket(writer_spec) account_id = cls._get_tmp_account_id(writer_spec) else: bucket = cls._get_gcs_bucket(writer_spec) account_id = cls._get_account_id(writer_spec) # GoogleCloudStorage format for filenames, Initial slash is required filename = "/%s/%s" % (bucket, filename_suffix) content_type = writer_spec.get(cls.CONTENT_TYPE_PARAM, None) options = {} if cls.ACL_PARAM in writer_spec: options["x-goog-acl"] = writer_spec.get(cls.ACL_PARAM) return cloudstorage.open(filename, mode="w", content_type=content_type, options=options, _account_id=account_id)
[ "def", "_open_file", "(", "cls", ",", "writer_spec", ",", "filename_suffix", ",", "use_tmp_bucket", "=", "False", ")", ":", "if", "use_tmp_bucket", ":", "bucket", "=", "cls", ".", "_get_tmp_gcs_bucket", "(", "writer_spec", ")", "account_id", "=", "cls", ".", ...
Opens a new gcs file for writing.
[ "Opens", "a", "new", "gcs", "file", "for", "writing", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L614-L633
12,213
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/output_writers.py
_GoogleCloudStorageOutputWriterBase.write
def write(self, data): """Write data to the GoogleCloudStorage file. Args: data: string containing the data to be written. """ start_time = time.time() self._get_write_buffer().write(data) ctx = context.get() operation.counters.Increment(COUNTER_IO_WRITE_BYTES, len(data))(ctx) operation.counters.Increment( COUNTER_IO_WRITE_MSEC, int((time.time() - start_time) * 1000))(ctx)
python
def write(self, data): start_time = time.time() self._get_write_buffer().write(data) ctx = context.get() operation.counters.Increment(COUNTER_IO_WRITE_BYTES, len(data))(ctx) operation.counters.Increment( COUNTER_IO_WRITE_MSEC, int((time.time() - start_time) * 1000))(ctx)
[ "def", "write", "(", "self", ",", "data", ")", ":", "start_time", "=", "time", ".", "time", "(", ")", "self", ".", "_get_write_buffer", "(", ")", ".", "write", "(", "data", ")", "ctx", "=", "context", ".", "get", "(", ")", "operation", ".", "counte...
Write data to the GoogleCloudStorage file. Args: data: string containing the data to be written.
[ "Write", "data", "to", "the", "GoogleCloudStorage", "file", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L651-L662
12,214
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/output_writers.py
_GoogleCloudStorageOutputWriter._create
def _create(cls, writer_spec, filename_suffix): """Helper method that actually creates the file in cloud storage.""" writer = cls._open_file(writer_spec, filename_suffix) return cls(writer, writer_spec=writer_spec)
python
def _create(cls, writer_spec, filename_suffix): writer = cls._open_file(writer_spec, filename_suffix) return cls(writer, writer_spec=writer_spec)
[ "def", "_create", "(", "cls", ",", "writer_spec", ",", "filename_suffix", ")", ":", "writer", "=", "cls", ".", "_open_file", "(", "writer_spec", ",", "filename_suffix", ")", "return", "cls", "(", "writer", ",", "writer_spec", "=", "writer_spec", ")" ]
Helper method that actually creates the file in cloud storage.
[ "Helper", "method", "that", "actually", "creates", "the", "file", "in", "cloud", "storage", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L744-L747
12,215
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/output_writers.py
GoogleCloudStorageConsistentOutputWriter._create_tmpfile
def _create_tmpfile(cls, status): """Creates a new random-named tmpfile.""" # We can't put the tmpfile in the same directory as the output. There are # rare circumstances when we leave trash behind and we don't want this trash # to be loaded into bigquery and/or used for restore. # # We used mapreduce id, shard number and attempt and 128 random bits to make # collisions virtually impossible. tmpl = string.Template(cls._TMPFILE_PATTERN) filename = tmpl.substitute( id=status.mapreduce_id, shard=status.shard, random=random.getrandbits(cls._RAND_BITS)) return cls._open_file(status.writer_spec, filename, use_tmp_bucket=True)
python
def _create_tmpfile(cls, status): # We can't put the tmpfile in the same directory as the output. There are # rare circumstances when we leave trash behind and we don't want this trash # to be loaded into bigquery and/or used for restore. # # We used mapreduce id, shard number and attempt and 128 random bits to make # collisions virtually impossible. tmpl = string.Template(cls._TMPFILE_PATTERN) filename = tmpl.substitute( id=status.mapreduce_id, shard=status.shard, random=random.getrandbits(cls._RAND_BITS)) return cls._open_file(status.writer_spec, filename, use_tmp_bucket=True)
[ "def", "_create_tmpfile", "(", "cls", ",", "status", ")", ":", "# We can't put the tmpfile in the same directory as the output. There are", "# rare circumstances when we leave trash behind and we don't want this trash", "# to be loaded into bigquery and/or used for restore.", "#", "# We used...
Creates a new random-named tmpfile.
[ "Creates", "a", "new", "random", "-", "named", "tmpfile", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L955-L969
12,216
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/output_writers.py
GoogleCloudStorageConsistentOutputWriter._try_to_clean_garbage
def _try_to_clean_garbage(self, writer_spec, exclude_list=()): """Tries to remove any files created by this shard that aren't needed. Args: writer_spec: writer_spec for the MR. exclude_list: A list of filenames (strings) that should not be removed. """ # Try to remove garbage (if any). Note that listbucket is not strongly # consistent so something might survive. tmpl = string.Template(self._TMPFILE_PREFIX) prefix = tmpl.substitute( id=self.status.mapreduce_id, shard=self.status.shard) bucket = self._get_tmp_gcs_bucket(writer_spec) account_id = self._get_tmp_account_id(writer_spec) for f in cloudstorage.listbucket("/%s/%s" % (bucket, prefix), _account_id=account_id): if f.filename not in exclude_list: self._remove_tmpfile(f.filename, self.status.writer_spec)
python
def _try_to_clean_garbage(self, writer_spec, exclude_list=()): # Try to remove garbage (if any). Note that listbucket is not strongly # consistent so something might survive. tmpl = string.Template(self._TMPFILE_PREFIX) prefix = tmpl.substitute( id=self.status.mapreduce_id, shard=self.status.shard) bucket = self._get_tmp_gcs_bucket(writer_spec) account_id = self._get_tmp_account_id(writer_spec) for f in cloudstorage.listbucket("/%s/%s" % (bucket, prefix), _account_id=account_id): if f.filename not in exclude_list: self._remove_tmpfile(f.filename, self.status.writer_spec)
[ "def", "_try_to_clean_garbage", "(", "self", ",", "writer_spec", ",", "exclude_list", "=", "(", ")", ")", ":", "# Try to remove garbage (if any). Note that listbucket is not strongly", "# consistent so something might survive.", "tmpl", "=", "string", ".", "Template", "(", ...
Tries to remove any files created by this shard that aren't needed. Args: writer_spec: writer_spec for the MR. exclude_list: A list of filenames (strings) that should not be removed.
[ "Tries", "to", "remove", "any", "files", "created", "by", "this", "shard", "that", "aren", "t", "needed", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L1014-L1032
12,217
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/property_range.py
_get_weights
def _get_weights(max_length): """Get weights for each offset in str of certain max length. Args: max_length: max length of the strings. Returns: A list of ints as weights. Example: If max_length is 2 and alphabet is "ab", then we have order "", "a", "aa", "ab", "b", "ba", "bb". So the weight for the first char is 3. """ weights = [1] for i in range(1, max_length): weights.append(weights[i-1] * len(_ALPHABET) + 1) weights.reverse() return weights
python
def _get_weights(max_length): weights = [1] for i in range(1, max_length): weights.append(weights[i-1] * len(_ALPHABET) + 1) weights.reverse() return weights
[ "def", "_get_weights", "(", "max_length", ")", ":", "weights", "=", "[", "1", "]", "for", "i", "in", "range", "(", "1", ",", "max_length", ")", ":", "weights", ".", "append", "(", "weights", "[", "i", "-", "1", "]", "*", "len", "(", "_ALPHABET", ...
Get weights for each offset in str of certain max length. Args: max_length: max length of the strings. Returns: A list of ints as weights. Example: If max_length is 2 and alphabet is "ab", then we have order "", "a", "aa", "ab", "b", "ba", "bb". So the weight for the first char is 3.
[ "Get", "weights", "for", "each", "offset", "in", "str", "of", "certain", "max", "length", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/property_range.py#L355-L372
12,218
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/property_range.py
_str_to_ord
def _str_to_ord(content, weights): """Converts a string to its lexicographical order. Args: content: the string to convert. Of type str. weights: weights from _get_weights. Returns: an int or long that represents the order of this string. "" has order 0. """ ordinal = 0 for i, c in enumerate(content): ordinal += weights[i] * _ALPHABET.index(c) + 1 return ordinal
python
def _str_to_ord(content, weights): ordinal = 0 for i, c in enumerate(content): ordinal += weights[i] * _ALPHABET.index(c) + 1 return ordinal
[ "def", "_str_to_ord", "(", "content", ",", "weights", ")", ":", "ordinal", "=", "0", "for", "i", ",", "c", "in", "enumerate", "(", "content", ")", ":", "ordinal", "+=", "weights", "[", "i", "]", "*", "_ALPHABET", ".", "index", "(", "c", ")", "+", ...
Converts a string to its lexicographical order. Args: content: the string to convert. Of type str. weights: weights from _get_weights. Returns: an int or long that represents the order of this string. "" has order 0.
[ "Converts", "a", "string", "to", "its", "lexicographical", "order", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/property_range.py#L375-L388
12,219
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/property_range.py
_ord_to_str
def _ord_to_str(ordinal, weights): """Reverse function of _str_to_ord.""" chars = [] for weight in weights: if ordinal == 0: return "".join(chars) ordinal -= 1 index, ordinal = divmod(ordinal, weight) chars.append(_ALPHABET[index]) return "".join(chars)
python
def _ord_to_str(ordinal, weights): chars = [] for weight in weights: if ordinal == 0: return "".join(chars) ordinal -= 1 index, ordinal = divmod(ordinal, weight) chars.append(_ALPHABET[index]) return "".join(chars)
[ "def", "_ord_to_str", "(", "ordinal", ",", "weights", ")", ":", "chars", "=", "[", "]", "for", "weight", "in", "weights", ":", "if", "ordinal", "==", "0", ":", "return", "\"\"", ".", "join", "(", "chars", ")", "ordinal", "-=", "1", "index", ",", "o...
Reverse function of _str_to_ord.
[ "Reverse", "function", "of", "_str_to_ord", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/property_range.py#L391-L400
12,220
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/property_range.py
PropertyRange._get_range_from_filters
def _get_range_from_filters(cls, filters, model_class): """Get property range from filters user provided. This method also validates there is one and only one closed range on a single property. Args: filters: user supplied filters. Each filter should be a list or tuple of format (<property_name_as_str>, <query_operator_as_str>, <value_of_certain_type>). Value type should satisfy the property's type. model_class: the model class for the entity type to apply filters on. Returns: a tuple of (property, start_filter, end_filter). property is the model's field that the range is about. start_filter and end_filter define the start and the end of the range. (None, None, None) if no range is found. Raises: BadReaderParamsError: if any filter is invalid in any way. """ if not filters: return None, None, None range_property = None start_val = None end_val = None start_filter = None end_filter = None for f in filters: prop, op, val = f if op in [">", ">=", "<", "<="]: if range_property and range_property != prop: raise errors.BadReaderParamsError( "Range on only one property is supported.") range_property = prop if val is None: raise errors.BadReaderParamsError( "Range can't be None in filter %s", f) if op in [">", ">="]: if start_val is not None: raise errors.BadReaderParamsError( "Operation %s is specified more than once.", op) start_val = val start_filter = f else: if end_val is not None: raise errors.BadReaderParamsError( "Operation %s is specified more than once.", op) end_val = val end_filter = f elif op != "=": raise errors.BadReaderParamsError( "Only < <= > >= = are supported as operation. Got %s", op) if not range_property: return None, None, None if start_val is None or end_val is None: raise errors.BadReaderParamsError( "Filter should contains a complete range on property %s", range_property) if issubclass(model_class, db.Model): property_obj = model_class.properties()[range_property] else: property_obj = ( model_class._properties[ # pylint: disable=protected-access range_property]) supported_properties = ( _DISCRETE_PROPERTY_SPLIT_FUNCTIONS.keys() + _CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS.keys()) if not isinstance(property_obj, tuple(supported_properties)): raise errors.BadReaderParamsError( "Filtered property %s is not supported by sharding.", range_property) if not start_val < end_val: raise errors.BadReaderParamsError( "Start value %s should be smaller than end value %s", start_val, end_val) return property_obj, start_filter, end_filter
python
def _get_range_from_filters(cls, filters, model_class): if not filters: return None, None, None range_property = None start_val = None end_val = None start_filter = None end_filter = None for f in filters: prop, op, val = f if op in [">", ">=", "<", "<="]: if range_property and range_property != prop: raise errors.BadReaderParamsError( "Range on only one property is supported.") range_property = prop if val is None: raise errors.BadReaderParamsError( "Range can't be None in filter %s", f) if op in [">", ">="]: if start_val is not None: raise errors.BadReaderParamsError( "Operation %s is specified more than once.", op) start_val = val start_filter = f else: if end_val is not None: raise errors.BadReaderParamsError( "Operation %s is specified more than once.", op) end_val = val end_filter = f elif op != "=": raise errors.BadReaderParamsError( "Only < <= > >= = are supported as operation. Got %s", op) if not range_property: return None, None, None if start_val is None or end_val is None: raise errors.BadReaderParamsError( "Filter should contains a complete range on property %s", range_property) if issubclass(model_class, db.Model): property_obj = model_class.properties()[range_property] else: property_obj = ( model_class._properties[ # pylint: disable=protected-access range_property]) supported_properties = ( _DISCRETE_PROPERTY_SPLIT_FUNCTIONS.keys() + _CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS.keys()) if not isinstance(property_obj, tuple(supported_properties)): raise errors.BadReaderParamsError( "Filtered property %s is not supported by sharding.", range_property) if not start_val < end_val: raise errors.BadReaderParamsError( "Start value %s should be smaller than end value %s", start_val, end_val) return property_obj, start_filter, end_filter
[ "def", "_get_range_from_filters", "(", "cls", ",", "filters", ",", "model_class", ")", ":", "if", "not", "filters", ":", "return", "None", ",", "None", ",", "None", "range_property", "=", "None", "start_val", "=", "None", "end_val", "=", "None", "start_filte...
Get property range from filters user provided. This method also validates there is one and only one closed range on a single property. Args: filters: user supplied filters. Each filter should be a list or tuple of format (<property_name_as_str>, <query_operator_as_str>, <value_of_certain_type>). Value type should satisfy the property's type. model_class: the model class for the entity type to apply filters on. Returns: a tuple of (property, start_filter, end_filter). property is the model's field that the range is about. start_filter and end_filter define the start and the end of the range. (None, None, None) if no range is found. Raises: BadReaderParamsError: if any filter is invalid in any way.
[ "Get", "property", "range", "from", "filters", "user", "provided", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/property_range.py#L81-L162
12,221
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/property_range.py
PropertyRange.split
def split(self, n): """Evenly split this range into contiguous, non overlapping subranges. Args: n: number of splits. Returns: a list of contiguous, non overlapping sub PropertyRanges. Maybe less than n when not enough subranges. """ new_range_filters = [] name = self.start[0] prop_cls = self.prop.__class__ if prop_cls in _DISCRETE_PROPERTY_SPLIT_FUNCTIONS: splitpoints = _DISCRETE_PROPERTY_SPLIT_FUNCTIONS[prop_cls]( self.start[2], self.end[2], n, self.start[1] == ">=", self.end[1] == "<=") start_filter = (name, ">=", splitpoints[0]) for p in splitpoints[1:]: end_filter = (name, "<", p) new_range_filters.append([start_filter, end_filter]) start_filter = (name, ">=", p) else: splitpoints = _CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS[prop_cls]( self.start[2], self.end[2], n) start_filter = self.start for p in splitpoints: end_filter = (name, "<", p) new_range_filters.append([start_filter, end_filter]) start_filter = (name, ">=", p) new_range_filters.append([start_filter, self.end]) for f in new_range_filters: f.extend(self._equality_filters) return [self.__class__(f, self.model_class_path) for f in new_range_filters]
python
def split(self, n): new_range_filters = [] name = self.start[0] prop_cls = self.prop.__class__ if prop_cls in _DISCRETE_PROPERTY_SPLIT_FUNCTIONS: splitpoints = _DISCRETE_PROPERTY_SPLIT_FUNCTIONS[prop_cls]( self.start[2], self.end[2], n, self.start[1] == ">=", self.end[1] == "<=") start_filter = (name, ">=", splitpoints[0]) for p in splitpoints[1:]: end_filter = (name, "<", p) new_range_filters.append([start_filter, end_filter]) start_filter = (name, ">=", p) else: splitpoints = _CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS[prop_cls]( self.start[2], self.end[2], n) start_filter = self.start for p in splitpoints: end_filter = (name, "<", p) new_range_filters.append([start_filter, end_filter]) start_filter = (name, ">=", p) new_range_filters.append([start_filter, self.end]) for f in new_range_filters: f.extend(self._equality_filters) return [self.__class__(f, self.model_class_path) for f in new_range_filters]
[ "def", "split", "(", "self", ",", "n", ")", ":", "new_range_filters", "=", "[", "]", "name", "=", "self", ".", "start", "[", "0", "]", "prop_cls", "=", "self", ".", "prop", ".", "__class__", "if", "prop_cls", "in", "_DISCRETE_PROPERTY_SPLIT_FUNCTIONS", "...
Evenly split this range into contiguous, non overlapping subranges. Args: n: number of splits. Returns: a list of contiguous, non overlapping sub PropertyRanges. Maybe less than n when not enough subranges.
[ "Evenly", "split", "this", "range", "into", "contiguous", "non", "overlapping", "subranges", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/property_range.py#L164-L199
12,222
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/property_range.py
PropertyRange.make_query
def make_query(self, ns): """Make a query of entities within this range. Query options are not supported. They should be specified when the query is run. Args: ns: namespace of this query. Returns: a db.Query or ndb.Query, depends on the model class's type. """ if issubclass(self.model_class, db.Model): query = db.Query(self.model_class, namespace=ns) for f in self.filters: query.filter("%s %s" % (f[0], f[1]), f[2]) else: query = self.model_class.query(namespace=ns) for f in self.filters: query = query.filter(ndb.FilterNode(*f)) return query
python
def make_query(self, ns): if issubclass(self.model_class, db.Model): query = db.Query(self.model_class, namespace=ns) for f in self.filters: query.filter("%s %s" % (f[0], f[1]), f[2]) else: query = self.model_class.query(namespace=ns) for f in self.filters: query = query.filter(ndb.FilterNode(*f)) return query
[ "def", "make_query", "(", "self", ",", "ns", ")", ":", "if", "issubclass", "(", "self", ".", "model_class", ",", "db", ".", "Model", ")", ":", "query", "=", "db", ".", "Query", "(", "self", ".", "model_class", ",", "namespace", "=", "ns", ")", "for...
Make a query of entities within this range. Query options are not supported. They should be specified when the query is run. Args: ns: namespace of this query. Returns: a db.Query or ndb.Query, depends on the model class's type.
[ "Make", "a", "query", "of", "entities", "within", "this", "range", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/property_range.py#L201-L221
12,223
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/api/map_job/output_writer.py
OutputWriter.commit_output
def commit_output(cls, shard_ctx, iterator): """Saves output references when a shard finishes. Inside end_shard(), an output writer can optionally use this method to persist some references to the outputs from this shard (e.g a list of filenames) Args: shard_ctx: map_job_context.ShardContext for this shard. iterator: an iterator that yields json serializable references to the outputs from this shard. Contents from the iterator can be accessible later via map_job.Job.get_outputs. """ # We accept an iterator just in case output references get too big. outs = tuple(iterator) shard_ctx._state.writer_state["outs"] = outs
python
def commit_output(cls, shard_ctx, iterator): # We accept an iterator just in case output references get too big. outs = tuple(iterator) shard_ctx._state.writer_state["outs"] = outs
[ "def", "commit_output", "(", "cls", ",", "shard_ctx", ",", "iterator", ")", ":", "# We accept an iterator just in case output references get too big.", "outs", "=", "tuple", "(", "iterator", ")", "shard_ctx", ".", "_state", ".", "writer_state", "[", "\"outs\"", "]", ...
Saves output references when a shard finishes. Inside end_shard(), an output writer can optionally use this method to persist some references to the outputs from this shard (e.g a list of filenames) Args: shard_ctx: map_job_context.ShardContext for this shard. iterator: an iterator that yields json serializable references to the outputs from this shard. Contents from the iterator can be accessible later via map_job.Job.get_outputs.
[ "Saves", "output", "references", "when", "a", "shard", "finishes", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/output_writer.py#L111-L127
12,224
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/key_ranges.py
KeyRangesFactory.from_json
def from_json(cls, json): """Deserialize from json. Args: json: a dict of json compatible fields. Returns: a KeyRanges object. Raises: ValueError: if the json is invalid. """ if json["name"] in _KEYRANGES_CLASSES: return _KEYRANGES_CLASSES[json["name"]].from_json(json) raise ValueError("Invalid json %s", json)
python
def from_json(cls, json): if json["name"] in _KEYRANGES_CLASSES: return _KEYRANGES_CLASSES[json["name"]].from_json(json) raise ValueError("Invalid json %s", json)
[ "def", "from_json", "(", "cls", ",", "json", ")", ":", "if", "json", "[", "\"name\"", "]", "in", "_KEYRANGES_CLASSES", ":", "return", "_KEYRANGES_CLASSES", "[", "json", "[", "\"name\"", "]", "]", ".", "from_json", "(", "json", ")", "raise", "ValueError", ...
Deserialize from json. Args: json: a dict of json compatible fields. Returns: a KeyRanges object. Raises: ValueError: if the json is invalid.
[ "Deserialize", "from", "json", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/key_ranges.py#L58-L72
12,225
GoogleCloudPlatform/appengine-mapreduce
python/demo/main.py
split_into_sentences
def split_into_sentences(s): """Split text into list of sentences.""" s = re.sub(r"\s+", " ", s) s = re.sub(r"[\\.\\?\\!]", "\n", s) return s.split("\n")
python
def split_into_sentences(s): s = re.sub(r"\s+", " ", s) s = re.sub(r"[\\.\\?\\!]", "\n", s) return s.split("\n")
[ "def", "split_into_sentences", "(", "s", ")", ":", "s", "=", "re", ".", "sub", "(", "r\"\\s+\"", ",", "\" \"", ",", "s", ")", "s", "=", "re", ".", "sub", "(", "r\"[\\\\.\\\\?\\\\!]\"", ",", "\"\\n\"", ",", "s", ")", "return", "s", ".", "split", "("...
Split text into list of sentences.
[ "Split", "text", "into", "list", "of", "sentences", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/demo/main.py#L181-L185
12,226
GoogleCloudPlatform/appengine-mapreduce
python/demo/main.py
split_into_words
def split_into_words(s): """Split a sentence into list of words.""" s = re.sub(r"\W+", " ", s) s = re.sub(r"[_0-9]+", " ", s) return s.split()
python
def split_into_words(s): s = re.sub(r"\W+", " ", s) s = re.sub(r"[_0-9]+", " ", s) return s.split()
[ "def", "split_into_words", "(", "s", ")", ":", "s", "=", "re", ".", "sub", "(", "r\"\\W+\"", ",", "\" \"", ",", "s", ")", "s", "=", "re", ".", "sub", "(", "r\"[_0-9]+\"", ",", "\" \"", ",", "s", ")", "return", "s", ".", "split", "(", ")" ]
Split a sentence into list of words.
[ "Split", "a", "sentence", "into", "list", "of", "words", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/demo/main.py#L188-L192
12,227
GoogleCloudPlatform/appengine-mapreduce
python/demo/main.py
index_map
def index_map(data): """Index demo map function.""" (entry, text_fn) = data text = text_fn() logging.debug("Got %s", entry.filename) for s in split_into_sentences(text): for w in split_into_words(s.lower()): yield (w, entry.filename)
python
def index_map(data): (entry, text_fn) = data text = text_fn() logging.debug("Got %s", entry.filename) for s in split_into_sentences(text): for w in split_into_words(s.lower()): yield (w, entry.filename)
[ "def", "index_map", "(", "data", ")", ":", "(", "entry", ",", "text_fn", ")", "=", "data", "text", "=", "text_fn", "(", ")", "logging", ".", "debug", "(", "\"Got %s\"", ",", "entry", ".", "filename", ")", "for", "s", "in", "split_into_sentences", "(", ...
Index demo map function.
[ "Index", "demo", "map", "function", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/demo/main.py#L211-L219
12,228
GoogleCloudPlatform/appengine-mapreduce
python/demo/main.py
phrases_map
def phrases_map(data): """Phrases demo map function.""" (entry, text_fn) = data text = text_fn() filename = entry.filename logging.debug("Got %s", filename) for s in split_into_sentences(text): words = split_into_words(s.lower()) if len(words) < PHRASE_LENGTH: yield (":".join(words), filename) continue for i in range(0, len(words) - PHRASE_LENGTH): yield (":".join(words[i:i+PHRASE_LENGTH]), filename)
python
def phrases_map(data): (entry, text_fn) = data text = text_fn() filename = entry.filename logging.debug("Got %s", filename) for s in split_into_sentences(text): words = split_into_words(s.lower()) if len(words) < PHRASE_LENGTH: yield (":".join(words), filename) continue for i in range(0, len(words) - PHRASE_LENGTH): yield (":".join(words[i:i+PHRASE_LENGTH]), filename)
[ "def", "phrases_map", "(", "data", ")", ":", "(", "entry", ",", "text_fn", ")", "=", "data", "text", "=", "text_fn", "(", ")", "filename", "=", "entry", ".", "filename", "logging", ".", "debug", "(", "\"Got %s\"", ",", "filename", ")", "for", "s", "i...
Phrases demo map function.
[ "Phrases", "demo", "map", "function", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/demo/main.py#L230-L243
12,229
GoogleCloudPlatform/appengine-mapreduce
python/demo/main.py
phrases_reduce
def phrases_reduce(key, values): """Phrases demo reduce function.""" if len(values) < 10: return counts = {} for filename in values: counts[filename] = counts.get(filename, 0) + 1 words = re.sub(r":", " ", key) threshold = len(values) / 2 for filename, count in counts.items(): if count > threshold: yield "%s:%s\n" % (words, filename)
python
def phrases_reduce(key, values): if len(values) < 10: return counts = {} for filename in values: counts[filename] = counts.get(filename, 0) + 1 words = re.sub(r":", " ", key) threshold = len(values) / 2 for filename, count in counts.items(): if count > threshold: yield "%s:%s\n" % (words, filename)
[ "def", "phrases_reduce", "(", "key", ",", "values", ")", ":", "if", "len", "(", "values", ")", "<", "10", ":", "return", "counts", "=", "{", "}", "for", "filename", "in", "values", ":", "counts", "[", "filename", "]", "=", "counts", ".", "get", "("...
Phrases demo reduce function.
[ "Phrases", "demo", "reduce", "function", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/demo/main.py#L246-L258
12,230
GoogleCloudPlatform/appengine-mapreduce
python/demo/main.py
FileMetadata.getKeyName
def getKeyName(username, date, blob_key): """Returns the internal key for a particular item in the database. Our items are stored with keys of the form 'user/date/blob_key' ('/' is not the real separator, but __SEP is). Args: username: The given user's e-mail address. date: A datetime object representing the date and time that an input file was uploaded to this app. blob_key: The blob key corresponding to the location of the input file in the Blobstore. Returns: The internal key for the item specified by (username, date, blob_key). """ sep = FileMetadata.__SEP return str(username + sep + str(date) + sep + blob_key)
python
def getKeyName(username, date, blob_key): sep = FileMetadata.__SEP return str(username + sep + str(date) + sep + blob_key)
[ "def", "getKeyName", "(", "username", ",", "date", ",", "blob_key", ")", ":", "sep", "=", "FileMetadata", ".", "__SEP", "return", "str", "(", "username", "+", "sep", "+", "str", "(", "date", ")", "+", "sep", "+", "blob_key", ")" ]
Returns the internal key for a particular item in the database. Our items are stored with keys of the form 'user/date/blob_key' ('/' is not the real separator, but __SEP is). Args: username: The given user's e-mail address. date: A datetime object representing the date and time that an input file was uploaded to this app. blob_key: The blob key corresponding to the location of the input file in the Blobstore. Returns: The internal key for the item specified by (username, date, blob_key).
[ "Returns", "the", "internal", "key", "for", "a", "particular", "item", "in", "the", "database", "." ]
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/demo/main.py#L113-L130
12,231
materialsproject/custodian
custodian/custodian.py
Custodian.from_spec
def from_spec(cls, spec): """ Load a Custodian instance where the jobs are specified from a structure and a spec dict. This allows simple custom job sequences to be constructed quickly via a YAML file. Args: spec (dict): A dict specifying job. A sample of the dict in YAML format for the usual MP workflow is given as follows ``` jobs: - jb: custodian.vasp.jobs.VaspJob params: final: False suffix: .relax1 - jb: custodian.vasp.jobs.VaspJob params: final: True suffix: .relax2 settings_override: {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}} jobs_common_params: vasp_cmd: /opt/vasp handlers: - hdlr: custodian.vasp.handlers.VaspErrorHandler - hdlr: custodian.vasp.handlers.AliasingErrorHandler - hdlr: custodian.vasp.handlers.MeshSymmetryHandler validators: - vldr: custodian.vasp.validators.VasprunXMLValidator custodian_params: scratch_dir: /tmp ``` The `jobs` key is a list of jobs. Each job is specified via "job": <explicit path>, and all parameters are specified via `params` which is a dict. `common_params` specify a common set of parameters that are passed to all jobs, e.g., vasp_cmd. Returns: Custodian instance. """ dec = MontyDecoder() def load_class(dotpath): modname, classname = dotpath.rsplit(".", 1) mod = __import__(modname, globals(), locals(), [classname], 0) return getattr(mod, classname) def process_params(d): decoded = {} for k, v in d.items(): if k.startswith("$"): if isinstance(v, list): v = [os.path.expandvars(i) for i in v] elif isinstance(v, dict): v = {k2: os.path.expandvars(v2) for k2, v2 in v.items()} else: v = os.path.expandvars(v) decoded[k.strip("$")] = dec.process_decoded(v) return decoded jobs = [] common_params = process_params(spec.get("jobs_common_params", {})) for d in spec["jobs"]: cls_ = load_class(d["jb"]) params = process_params(d.get("params", {})) params.update(common_params) jobs.append(cls_(**params)) handlers = [] for d in spec.get("handlers", []): cls_ = load_class(d["hdlr"]) params = process_params(d.get("params", {})) handlers.append(cls_(**params)) validators = [] for d in spec.get("validators", []): cls_ = load_class(d["vldr"]) params = process_params(d.get("params", {})) validators.append(cls_(**params)) custodian_params = process_params(spec.get("custodian_params", {})) return cls(jobs=jobs, handlers=handlers, validators=validators, **custodian_params)
python
def from_spec(cls, spec): dec = MontyDecoder() def load_class(dotpath): modname, classname = dotpath.rsplit(".", 1) mod = __import__(modname, globals(), locals(), [classname], 0) return getattr(mod, classname) def process_params(d): decoded = {} for k, v in d.items(): if k.startswith("$"): if isinstance(v, list): v = [os.path.expandvars(i) for i in v] elif isinstance(v, dict): v = {k2: os.path.expandvars(v2) for k2, v2 in v.items()} else: v = os.path.expandvars(v) decoded[k.strip("$")] = dec.process_decoded(v) return decoded jobs = [] common_params = process_params(spec.get("jobs_common_params", {})) for d in spec["jobs"]: cls_ = load_class(d["jb"]) params = process_params(d.get("params", {})) params.update(common_params) jobs.append(cls_(**params)) handlers = [] for d in spec.get("handlers", []): cls_ = load_class(d["hdlr"]) params = process_params(d.get("params", {})) handlers.append(cls_(**params)) validators = [] for d in spec.get("validators", []): cls_ = load_class(d["vldr"]) params = process_params(d.get("params", {})) validators.append(cls_(**params)) custodian_params = process_params(spec.get("custodian_params", {})) return cls(jobs=jobs, handlers=handlers, validators=validators, **custodian_params)
[ "def", "from_spec", "(", "cls", ",", "spec", ")", ":", "dec", "=", "MontyDecoder", "(", ")", "def", "load_class", "(", "dotpath", ")", ":", "modname", ",", "classname", "=", "dotpath", ".", "rsplit", "(", "\".\"", ",", "1", ")", "mod", "=", "__import...
Load a Custodian instance where the jobs are specified from a structure and a spec dict. This allows simple custom job sequences to be constructed quickly via a YAML file. Args: spec (dict): A dict specifying job. A sample of the dict in YAML format for the usual MP workflow is given as follows ``` jobs: - jb: custodian.vasp.jobs.VaspJob params: final: False suffix: .relax1 - jb: custodian.vasp.jobs.VaspJob params: final: True suffix: .relax2 settings_override: {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}} jobs_common_params: vasp_cmd: /opt/vasp handlers: - hdlr: custodian.vasp.handlers.VaspErrorHandler - hdlr: custodian.vasp.handlers.AliasingErrorHandler - hdlr: custodian.vasp.handlers.MeshSymmetryHandler validators: - vldr: custodian.vasp.validators.VasprunXMLValidator custodian_params: scratch_dir: /tmp ``` The `jobs` key is a list of jobs. Each job is specified via "job": <explicit path>, and all parameters are specified via `params` which is a dict. `common_params` specify a common set of parameters that are passed to all jobs, e.g., vasp_cmd. Returns: Custodian instance.
[ "Load", "a", "Custodian", "instance", "where", "the", "jobs", "are", "specified", "from", "a", "structure", "and", "a", "spec", "dict", ".", "This", "allows", "simple", "custom", "job", "sequences", "to", "be", "constructed", "quickly", "via", "a", "YAML", ...
b33b01574fc899f959acb3c495398fd3d0fc41d0
https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/custodian.py#L204-L292
12,232
materialsproject/custodian
custodian/custodian.py
Custodian.run
def run(self): """ Runs all jobs. Returns: All errors encountered as a list of list. [[error_dicts for job 1], [error_dicts for job 2], ....] Raises: ValidationError: if a job fails validation ReturnCodeError: if the process has a return code different from 0 NonRecoverableError: if an unrecoverable occurs MaxCorrectionsPerJobError: if max_errors_per_job is reached MaxCorrectionsError: if max_errors is reached MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached """ cwd = os.getcwd() with ScratchDir(self.scratch_dir, create_symbolic_link=True, copy_to_current_on_exit=True, copy_from_current_on_enter=True) as temp_dir: self.total_errors = 0 start = datetime.datetime.now() logger.info("Run started at {} in {}.".format( start, temp_dir)) v = sys.version.replace("\n", " ") logger.info("Custodian running on Python version {}".format(v)) logger.info("Hostname: {}, Cluster: {}".format( *get_execution_host_info())) try: # skip jobs until the restart for job_n, job in islice(enumerate(self.jobs, 1), self.restart, None): self._run_job(job_n, job) # We do a dump of the run log after each job. dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder, indent=4) # Checkpoint after each job so that we can recover from last # point and remove old checkpoints if self.checkpoint: self.restart = job_n Custodian._save_checkpoint(cwd, job_n) except CustodianError as ex: logger.error(ex.message) if ex.raises: raise finally: # Log the corrections to a json file. logger.info("Logging to {}...".format(Custodian.LOG_FILE)) dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder, indent=4) end = datetime.datetime.now() logger.info("Run ended at {}.".format(end)) run_time = end - start logger.info("Run completed. Total time taken = {}." .format(run_time)) if self.gzipped_output: gzip_dir(".") # Cleanup checkpoint files (if any) if run is successful. Custodian._delete_checkpoints(cwd) return self.run_log
python
def run(self): cwd = os.getcwd() with ScratchDir(self.scratch_dir, create_symbolic_link=True, copy_to_current_on_exit=True, copy_from_current_on_enter=True) as temp_dir: self.total_errors = 0 start = datetime.datetime.now() logger.info("Run started at {} in {}.".format( start, temp_dir)) v = sys.version.replace("\n", " ") logger.info("Custodian running on Python version {}".format(v)) logger.info("Hostname: {}, Cluster: {}".format( *get_execution_host_info())) try: # skip jobs until the restart for job_n, job in islice(enumerate(self.jobs, 1), self.restart, None): self._run_job(job_n, job) # We do a dump of the run log after each job. dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder, indent=4) # Checkpoint after each job so that we can recover from last # point and remove old checkpoints if self.checkpoint: self.restart = job_n Custodian._save_checkpoint(cwd, job_n) except CustodianError as ex: logger.error(ex.message) if ex.raises: raise finally: # Log the corrections to a json file. logger.info("Logging to {}...".format(Custodian.LOG_FILE)) dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder, indent=4) end = datetime.datetime.now() logger.info("Run ended at {}.".format(end)) run_time = end - start logger.info("Run completed. Total time taken = {}." .format(run_time)) if self.gzipped_output: gzip_dir(".") # Cleanup checkpoint files (if any) if run is successful. Custodian._delete_checkpoints(cwd) return self.run_log
[ "def", "run", "(", "self", ")", ":", "cwd", "=", "os", ".", "getcwd", "(", ")", "with", "ScratchDir", "(", "self", ".", "scratch_dir", ",", "create_symbolic_link", "=", "True", ",", "copy_to_current_on_exit", "=", "True", ",", "copy_from_current_on_enter", "...
Runs all jobs. Returns: All errors encountered as a list of list. [[error_dicts for job 1], [error_dicts for job 2], ....] Raises: ValidationError: if a job fails validation ReturnCodeError: if the process has a return code different from 0 NonRecoverableError: if an unrecoverable occurs MaxCorrectionsPerJobError: if max_errors_per_job is reached MaxCorrectionsError: if max_errors is reached MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached
[ "Runs", "all", "jobs", "." ]
b33b01574fc899f959acb3c495398fd3d0fc41d0
https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/custodian.py#L294-L357
12,233
materialsproject/custodian
custodian/custodian.py
Custodian._do_check
def _do_check(self, handlers, terminate_func=None): """ checks the specified handlers. Returns True iff errors caught """ corrections = [] for h in handlers: try: if h.check(): if h.max_num_corrections is not None \ and h.n_applied_corrections >= h.max_num_corrections: msg = "Maximum number of corrections {} reached " \ "for handler {}".format(h.max_num_corrections, h) if h.raise_on_max: self.run_log[-1]["handler"] = h self.run_log[-1]["max_errors_per_handler"] = True raise MaxCorrectionsPerHandlerError(msg, True, h.max_num_corrections, h) else: logger.warning(msg+" Correction not applied.") continue if terminate_func is not None and h.is_terminating: logger.info("Terminating job") terminate_func() # make sure we don't terminate twice terminate_func = None d = h.correct() d["handler"] = h logger.error("\n" + pformat(d, indent=2, width=-1)) corrections.append(d) h.n_applied_corrections += 1 except Exception: if not self.skip_over_errors: raise else: import traceback logger.error("Bad handler %s " % h) logger.error(traceback.format_exc()) corrections.append( {"errors": ["Bad handler %s " % h], "actions": []}) self.total_errors += len(corrections) self.errors_current_job += len(corrections) self.run_log[-1]["corrections"].extend(corrections) # We do a dump of the run log after each check. dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder, indent=4) return len(corrections) > 0
python
def _do_check(self, handlers, terminate_func=None): corrections = [] for h in handlers: try: if h.check(): if h.max_num_corrections is not None \ and h.n_applied_corrections >= h.max_num_corrections: msg = "Maximum number of corrections {} reached " \ "for handler {}".format(h.max_num_corrections, h) if h.raise_on_max: self.run_log[-1]["handler"] = h self.run_log[-1]["max_errors_per_handler"] = True raise MaxCorrectionsPerHandlerError(msg, True, h.max_num_corrections, h) else: logger.warning(msg+" Correction not applied.") continue if terminate_func is not None and h.is_terminating: logger.info("Terminating job") terminate_func() # make sure we don't terminate twice terminate_func = None d = h.correct() d["handler"] = h logger.error("\n" + pformat(d, indent=2, width=-1)) corrections.append(d) h.n_applied_corrections += 1 except Exception: if not self.skip_over_errors: raise else: import traceback logger.error("Bad handler %s " % h) logger.error(traceback.format_exc()) corrections.append( {"errors": ["Bad handler %s " % h], "actions": []}) self.total_errors += len(corrections) self.errors_current_job += len(corrections) self.run_log[-1]["corrections"].extend(corrections) # We do a dump of the run log after each check. dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder, indent=4) return len(corrections) > 0
[ "def", "_do_check", "(", "self", ",", "handlers", ",", "terminate_func", "=", "None", ")", ":", "corrections", "=", "[", "]", "for", "h", "in", "handlers", ":", "try", ":", "if", "h", ".", "check", "(", ")", ":", "if", "h", ".", "max_num_corrections"...
checks the specified handlers. Returns True iff errors caught
[ "checks", "the", "specified", "handlers", ".", "Returns", "True", "iff", "errors", "caught" ]
b33b01574fc899f959acb3c495398fd3d0fc41d0
https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/custodian.py#L598-L643
12,234
materialsproject/custodian
custodian/feff/interpreter.py
FeffModder.apply_actions
def apply_actions(self, actions): """ Applies a list of actions to the FEFF Input Set and rewrites modified files. Args: actions [dict]: A list of actions of the form {'file': filename, 'action': moddermodification} or {'dict': feffinput_key, 'action': moddermodification} """ modified = [] for a in actions: if "dict" in a: k = a["dict"] modified.append(k) self.feffinp[k] = self.modify_object(a["action"], self.feffinp[k]) elif "file" in a: self.modify(a["action"], a["file"]) else: raise ValueError("Unrecognized format: {}".format(a)) if modified: feff = self.feffinp feff_input = "\n\n".join(str(feff[k]) for k in ["HEADER", "PARAMETERS", "POTENTIALS", "ATOMS"] if k in feff) for k, v in six.iteritems(feff): with open(os.path.join('.', k), "w") as f: f.write(str(v)) with open(os.path.join('.', "feff.inp"), "w") as f: f.write(feff_input)
python
def apply_actions(self, actions): modified = [] for a in actions: if "dict" in a: k = a["dict"] modified.append(k) self.feffinp[k] = self.modify_object(a["action"], self.feffinp[k]) elif "file" in a: self.modify(a["action"], a["file"]) else: raise ValueError("Unrecognized format: {}".format(a)) if modified: feff = self.feffinp feff_input = "\n\n".join(str(feff[k]) for k in ["HEADER", "PARAMETERS", "POTENTIALS", "ATOMS"] if k in feff) for k, v in six.iteritems(feff): with open(os.path.join('.', k), "w") as f: f.write(str(v)) with open(os.path.join('.', "feff.inp"), "w") as f: f.write(feff_input)
[ "def", "apply_actions", "(", "self", ",", "actions", ")", ":", "modified", "=", "[", "]", "for", "a", "in", "actions", ":", "if", "\"dict\"", "in", "a", ":", "k", "=", "a", "[", "\"dict\"", "]", "modified", ".", "append", "(", "k", ")", "self", "...
Applies a list of actions to the FEFF Input Set and rewrites modified files. Args: actions [dict]: A list of actions of the form {'file': filename, 'action': moddermodification} or {'dict': feffinput_key, 'action': moddermodification}
[ "Applies", "a", "list", "of", "actions", "to", "the", "FEFF", "Input", "Set", "and", "rewrites", "modified", "files", "." ]
b33b01574fc899f959acb3c495398fd3d0fc41d0
https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/feff/interpreter.py#L35-L65
12,235
materialsproject/custodian
custodian/ansible/actions.py
FileActions.file_modify
def file_modify(filename, settings): """ Modifies file access Args: filename (str): Filename. settings (dict): Can be "mode" or "owners" """ for k, v in settings.items(): if k == "mode": os.chmod(filename,v) if k == "owners": os.chown(filename,v)
python
def file_modify(filename, settings): for k, v in settings.items(): if k == "mode": os.chmod(filename,v) if k == "owners": os.chown(filename,v)
[ "def", "file_modify", "(", "filename", ",", "settings", ")", ":", "for", "k", ",", "v", "in", "settings", ".", "items", "(", ")", ":", "if", "k", "==", "\"mode\"", ":", "os", ".", "chmod", "(", "filename", ",", "v", ")", "if", "k", "==", "\"owner...
Modifies file access Args: filename (str): Filename. settings (dict): Can be "mode" or "owners"
[ "Modifies", "file", "access" ]
b33b01574fc899f959acb3c495398fd3d0fc41d0
https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/ansible/actions.py#L223-L235
12,236
materialsproject/custodian
custodian/ansible/interpreter.py
Modder.modify
def modify(self, modification, obj): """ Note that modify makes actual in-place modifications. It does not return a copy. Args: modification (dict): Modification must be {action_keyword : settings}. E.g., {'_set': {'Hello':'Universe', 'Bye': 'World'}} obj (dict/str/object): Object to modify depending on actions. For example, for DictActions, obj will be a dict to be modified. For FileActions, obj will be a string with a full pathname to a file. """ for action, settings in modification.items(): if action in self.supported_actions: self.supported_actions[action].__call__(obj, settings) elif self.strict: raise ValueError("{} is not a supported action!" .format(action))
python
def modify(self, modification, obj): for action, settings in modification.items(): if action in self.supported_actions: self.supported_actions[action].__call__(obj, settings) elif self.strict: raise ValueError("{} is not a supported action!" .format(action))
[ "def", "modify", "(", "self", ",", "modification", ",", "obj", ")", ":", "for", "action", ",", "settings", "in", "modification", ".", "items", "(", ")", ":", "if", "action", "in", "self", ".", "supported_actions", ":", "self", ".", "supported_actions", "...
Note that modify makes actual in-place modifications. It does not return a copy. Args: modification (dict): Modification must be {action_keyword : settings}. E.g., {'_set': {'Hello':'Universe', 'Bye': 'World'}} obj (dict/str/object): Object to modify depending on actions. For example, for DictActions, obj will be a dict to be modified. For FileActions, obj will be a string with a full pathname to a file.
[ "Note", "that", "modify", "makes", "actual", "in", "-", "place", "modifications", ".", "It", "does", "not", "return", "a", "copy", "." ]
b33b01574fc899f959acb3c495398fd3d0fc41d0
https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/ansible/interpreter.py#L67-L85
12,237
materialsproject/custodian
custodian/utils.py
backup
def backup(filenames, prefix="error"): """ Backup files to a tar.gz file. Used, for example, in backing up the files of an errored run before performing corrections. Args: filenames ([str]): List of files to backup. Supports wildcards, e.g., *.*. prefix (str): prefix to the files. Defaults to error, which means a series of error.1.tar.gz, error.2.tar.gz, ... will be generated. """ num = max([0] + [int(f.split(".")[1]) for f in glob("{}.*.tar.gz".format(prefix))]) filename = "{}.{}.tar.gz".format(prefix, num + 1) logging.info("Backing up run to {}.".format(filename)) with tarfile.open(filename, "w:gz") as tar: for fname in filenames: for f in glob(fname): tar.add(f)
python
def backup(filenames, prefix="error"): num = max([0] + [int(f.split(".")[1]) for f in glob("{}.*.tar.gz".format(prefix))]) filename = "{}.{}.tar.gz".format(prefix, num + 1) logging.info("Backing up run to {}.".format(filename)) with tarfile.open(filename, "w:gz") as tar: for fname in filenames: for f in glob(fname): tar.add(f)
[ "def", "backup", "(", "filenames", ",", "prefix", "=", "\"error\"", ")", ":", "num", "=", "max", "(", "[", "0", "]", "+", "[", "int", "(", "f", ".", "split", "(", "\".\"", ")", "[", "1", "]", ")", "for", "f", "in", "glob", "(", "\"{}.*.tar.gz\"...
Backup files to a tar.gz file. Used, for example, in backing up the files of an errored run before performing corrections. Args: filenames ([str]): List of files to backup. Supports wildcards, e.g., *.*. prefix (str): prefix to the files. Defaults to error, which means a series of error.1.tar.gz, error.2.tar.gz, ... will be generated.
[ "Backup", "files", "to", "a", "tar", ".", "gz", "file", ".", "Used", "for", "example", "in", "backing", "up", "the", "files", "of", "an", "errored", "run", "before", "performing", "corrections", "." ]
b33b01574fc899f959acb3c495398fd3d0fc41d0
https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/utils.py#L23-L41
12,238
materialsproject/custodian
custodian/utils.py
get_execution_host_info
def get_execution_host_info(): """ Tries to return a tuple describing the execution host. Doesn't work for all queueing systems Returns: (HOSTNAME, CLUSTER_NAME) """ host = os.environ.get('HOSTNAME', None) cluster = os.environ.get('SGE_O_HOST', None) if host is None: try: import socket host = host or socket.gethostname() except: pass return host or 'unknown', cluster or 'unknown'
python
def get_execution_host_info(): host = os.environ.get('HOSTNAME', None) cluster = os.environ.get('SGE_O_HOST', None) if host is None: try: import socket host = host or socket.gethostname() except: pass return host or 'unknown', cluster or 'unknown'
[ "def", "get_execution_host_info", "(", ")", ":", "host", "=", "os", ".", "environ", ".", "get", "(", "'HOSTNAME'", ",", "None", ")", "cluster", "=", "os", ".", "environ", ".", "get", "(", "'SGE_O_HOST'", ",", "None", ")", "if", "host", "is", "None", ...
Tries to return a tuple describing the execution host. Doesn't work for all queueing systems Returns: (HOSTNAME, CLUSTER_NAME)
[ "Tries", "to", "return", "a", "tuple", "describing", "the", "execution", "host", ".", "Doesn", "t", "work", "for", "all", "queueing", "systems" ]
b33b01574fc899f959acb3c495398fd3d0fc41d0
https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/utils.py#L44-L60
12,239
materialsproject/custodian
custodian/qchem/jobs.py
QCJob.run
def run(self): """ Perform the actual QChem run. Returns: (subprocess.Popen) Used for monitoring. """ qclog = open(self.qclog_file, 'w') p = subprocess.Popen(self.current_command, stdout=qclog) return p
python
def run(self): qclog = open(self.qclog_file, 'w') p = subprocess.Popen(self.current_command, stdout=qclog) return p
[ "def", "run", "(", "self", ")", ":", "qclog", "=", "open", "(", "self", ".", "qclog_file", ",", "'w'", ")", "p", "=", "subprocess", ".", "Popen", "(", "self", ".", "current_command", ",", "stdout", "=", "qclog", ")", "return", "p" ]
Perform the actual QChem run. Returns: (subprocess.Popen) Used for monitoring.
[ "Perform", "the", "actual", "QChem", "run", "." ]
b33b01574fc899f959acb3c495398fd3d0fc41d0
https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/qchem/jobs.py#L120-L129
12,240
materialsproject/custodian
custodian/vasp/jobs.py
VaspJob.setup
def setup(self): """ Performs initial setup for VaspJob, including overriding any settings and backing up. """ decompress_dir('.') if self.backup: for f in VASP_INPUT_FILES: shutil.copy(f, "{}.orig".format(f)) if self.auto_npar: try: incar = Incar.from_file("INCAR") # Only optimized NPAR for non-HF and non-RPA calculations. if not (incar.get("LHFCALC") or incar.get("LRPA") or incar.get("LEPSILON")): if incar.get("IBRION") in [5, 6, 7, 8]: # NPAR should not be set for Hessian matrix # calculations, whether in DFPT or otherwise. del incar["NPAR"] else: import multiprocessing # try sge environment variable first # (since multiprocessing counts cores on the current # machine only) ncores = os.environ.get('NSLOTS') or \ multiprocessing.cpu_count() ncores = int(ncores) for npar in range(int(math.sqrt(ncores)), ncores): if ncores % npar == 0: incar["NPAR"] = npar break incar.write_file("INCAR") except: pass if self.auto_continue: if os.path.exists("continue.json"): actions = loadfn("continue.json").get("actions") logger.info("Continuing previous VaspJob. Actions: {}".format(actions)) backup(VASP_BACKUP_FILES, prefix="prev_run") VaspModder().apply_actions(actions) else: # Default functionality is to copy CONTCAR to POSCAR and set # ISTART to 1 in the INCAR, but other actions can be specified if self.auto_continue is True: actions = [{"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}, {"dict": "INCAR", "action": {"_set": {"ISTART": 1}}}] else: actions = self.auto_continue dumpfn({"actions": actions}, "continue.json") if self.settings_override is not None: VaspModder().apply_actions(self.settings_override)
python
def setup(self): decompress_dir('.') if self.backup: for f in VASP_INPUT_FILES: shutil.copy(f, "{}.orig".format(f)) if self.auto_npar: try: incar = Incar.from_file("INCAR") # Only optimized NPAR for non-HF and non-RPA calculations. if not (incar.get("LHFCALC") or incar.get("LRPA") or incar.get("LEPSILON")): if incar.get("IBRION") in [5, 6, 7, 8]: # NPAR should not be set for Hessian matrix # calculations, whether in DFPT or otherwise. del incar["NPAR"] else: import multiprocessing # try sge environment variable first # (since multiprocessing counts cores on the current # machine only) ncores = os.environ.get('NSLOTS') or \ multiprocessing.cpu_count() ncores = int(ncores) for npar in range(int(math.sqrt(ncores)), ncores): if ncores % npar == 0: incar["NPAR"] = npar break incar.write_file("INCAR") except: pass if self.auto_continue: if os.path.exists("continue.json"): actions = loadfn("continue.json").get("actions") logger.info("Continuing previous VaspJob. Actions: {}".format(actions)) backup(VASP_BACKUP_FILES, prefix="prev_run") VaspModder().apply_actions(actions) else: # Default functionality is to copy CONTCAR to POSCAR and set # ISTART to 1 in the INCAR, but other actions can be specified if self.auto_continue is True: actions = [{"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}, {"dict": "INCAR", "action": {"_set": {"ISTART": 1}}}] else: actions = self.auto_continue dumpfn({"actions": actions}, "continue.json") if self.settings_override is not None: VaspModder().apply_actions(self.settings_override)
[ "def", "setup", "(", "self", ")", ":", "decompress_dir", "(", "'.'", ")", "if", "self", ".", "backup", ":", "for", "f", "in", "VASP_INPUT_FILES", ":", "shutil", ".", "copy", "(", "f", ",", "\"{}.orig\"", ".", "format", "(", "f", ")", ")", "if", "se...
Performs initial setup for VaspJob, including overriding any settings and backing up.
[ "Performs", "initial", "setup", "for", "VaspJob", "including", "overriding", "any", "settings", "and", "backing", "up", "." ]
b33b01574fc899f959acb3c495398fd3d0fc41d0
https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/vasp/jobs.py#L131-L189
12,241
materialsproject/custodian
custodian/vasp/jobs.py
VaspJob.run
def run(self): """ Perform the actual VASP run. Returns: (subprocess.Popen) Used for monitoring. """ cmd = list(self.vasp_cmd) if self.auto_gamma: vi = VaspInput.from_directory(".") kpts = vi["KPOINTS"] if kpts.style == Kpoints.supported_modes.Gamma \ and tuple(kpts.kpts[0]) == (1, 1, 1): if self.gamma_vasp_cmd is not None and which( self.gamma_vasp_cmd[-1]): cmd = self.gamma_vasp_cmd elif which(cmd[-1] + ".gamma"): cmd[-1] += ".gamma" logger.info("Running {}".format(" ".join(cmd))) with open(self.output_file, 'w') as f_std, \ open(self.stderr_file, "w", buffering=1) as f_err: # use line buffering for stderr p = subprocess.Popen(cmd, stdout=f_std, stderr=f_err) return p
python
def run(self): cmd = list(self.vasp_cmd) if self.auto_gamma: vi = VaspInput.from_directory(".") kpts = vi["KPOINTS"] if kpts.style == Kpoints.supported_modes.Gamma \ and tuple(kpts.kpts[0]) == (1, 1, 1): if self.gamma_vasp_cmd is not None and which( self.gamma_vasp_cmd[-1]): cmd = self.gamma_vasp_cmd elif which(cmd[-1] + ".gamma"): cmd[-1] += ".gamma" logger.info("Running {}".format(" ".join(cmd))) with open(self.output_file, 'w') as f_std, \ open(self.stderr_file, "w", buffering=1) as f_err: # use line buffering for stderr p = subprocess.Popen(cmd, stdout=f_std, stderr=f_err) return p
[ "def", "run", "(", "self", ")", ":", "cmd", "=", "list", "(", "self", ".", "vasp_cmd", ")", "if", "self", ".", "auto_gamma", ":", "vi", "=", "VaspInput", ".", "from_directory", "(", "\".\"", ")", "kpts", "=", "vi", "[", "\"KPOINTS\"", "]", "if", "k...
Perform the actual VASP run. Returns: (subprocess.Popen) Used for monitoring.
[ "Perform", "the", "actual", "VASP", "run", "." ]
b33b01574fc899f959acb3c495398fd3d0fc41d0
https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/vasp/jobs.py#L191-L214
12,242
materialsproject/custodian
custodian/vasp/jobs.py
VaspJob.postprocess
def postprocess(self): """ Postprocessing includes renaming and gzipping where necessary. Also copies the magmom to the incar if necessary """ for f in VASP_OUTPUT_FILES + [self.output_file]: if os.path.exists(f): if self.final and self.suffix != "": shutil.move(f, "{}{}".format(f, self.suffix)) elif self.suffix != "": shutil.copy(f, "{}{}".format(f, self.suffix)) if self.copy_magmom and not self.final: try: outcar = Outcar("OUTCAR") magmom = [m['tot'] for m in outcar.magnetization] incar = Incar.from_file("INCAR") incar['MAGMOM'] = magmom incar.write_file("INCAR") except: logger.error('MAGMOM copy from OUTCAR to INCAR failed') # Remove continuation so if a subsequent job is run in # the same directory, will not restart this job. if os.path.exists("continue.json"): os.remove("continue.json")
python
def postprocess(self): for f in VASP_OUTPUT_FILES + [self.output_file]: if os.path.exists(f): if self.final and self.suffix != "": shutil.move(f, "{}{}".format(f, self.suffix)) elif self.suffix != "": shutil.copy(f, "{}{}".format(f, self.suffix)) if self.copy_magmom and not self.final: try: outcar = Outcar("OUTCAR") magmom = [m['tot'] for m in outcar.magnetization] incar = Incar.from_file("INCAR") incar['MAGMOM'] = magmom incar.write_file("INCAR") except: logger.error('MAGMOM copy from OUTCAR to INCAR failed') # Remove continuation so if a subsequent job is run in # the same directory, will not restart this job. if os.path.exists("continue.json"): os.remove("continue.json")
[ "def", "postprocess", "(", "self", ")", ":", "for", "f", "in", "VASP_OUTPUT_FILES", "+", "[", "self", ".", "output_file", "]", ":", "if", "os", ".", "path", ".", "exists", "(", "f", ")", ":", "if", "self", ".", "final", "and", "self", ".", "suffix"...
Postprocessing includes renaming and gzipping where necessary. Also copies the magmom to the incar if necessary
[ "Postprocessing", "includes", "renaming", "and", "gzipping", "where", "necessary", ".", "Also", "copies", "the", "magmom", "to", "the", "incar", "if", "necessary" ]
b33b01574fc899f959acb3c495398fd3d0fc41d0
https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/vasp/jobs.py#L216-L241
12,243
materialsproject/custodian
custodian/vasp/jobs.py
VaspJob.double_relaxation_run
def double_relaxation_run(cls, vasp_cmd, auto_npar=True, ediffg=-0.05, half_kpts_first_relax=False, auto_continue=False): """ Returns a list of two jobs corresponding to an AFLOW style double relaxation run. Args: vasp_cmd (str): Command to run vasp as a list of args. For example, if you are using mpirun, it can be something like ["mpirun", "pvasp.5.2.11"] auto_npar (bool): Whether to automatically tune NPAR to be sqrt( number of cores) as recommended by VASP for DFT calculations. Generally, this results in significant speedups. Defaults to True. Set to False for HF, GW and RPA calculations. ediffg (float): Force convergence criteria for subsequent runs ( ignored for the initial run.) half_kpts_first_relax (bool): Whether to halve the kpoint grid for the first relaxation. Speeds up difficult convergence considerably. Defaults to False. Returns: List of two jobs corresponding to an AFLOW style run. """ incar_update = {"ISTART": 1} if ediffg: incar_update["EDIFFG"] = ediffg settings_overide_1 = None settings_overide_2 = [ {"dict": "INCAR", "action": {"_set": incar_update}}, {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}] if half_kpts_first_relax and os.path.exists("KPOINTS") and \ os.path.exists("POSCAR"): kpts = Kpoints.from_file("KPOINTS") orig_kpts_dict = kpts.as_dict() # lattice vectors with length < 8 will get >1 KPOINT kpts.kpts = np.round(np.maximum(np.array(kpts.kpts) / 2, 1)).astype(int).tolist() low_kpts_dict = kpts.as_dict() settings_overide_1 = [ {"dict": "KPOINTS", "action": {"_set": low_kpts_dict}} ] settings_overide_2.append( {"dict": "KPOINTS", "action": {"_set": orig_kpts_dict}} ) return [VaspJob(vasp_cmd, final=False, suffix=".relax1", auto_npar=auto_npar, auto_continue=auto_continue, settings_override=settings_overide_1), VaspJob(vasp_cmd, final=True, backup=False, suffix=".relax2", auto_npar=auto_npar, auto_continue=auto_continue, settings_override=settings_overide_2)]
python
def double_relaxation_run(cls, vasp_cmd, auto_npar=True, ediffg=-0.05, half_kpts_first_relax=False, auto_continue=False): incar_update = {"ISTART": 1} if ediffg: incar_update["EDIFFG"] = ediffg settings_overide_1 = None settings_overide_2 = [ {"dict": "INCAR", "action": {"_set": incar_update}}, {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}] if half_kpts_first_relax and os.path.exists("KPOINTS") and \ os.path.exists("POSCAR"): kpts = Kpoints.from_file("KPOINTS") orig_kpts_dict = kpts.as_dict() # lattice vectors with length < 8 will get >1 KPOINT kpts.kpts = np.round(np.maximum(np.array(kpts.kpts) / 2, 1)).astype(int).tolist() low_kpts_dict = kpts.as_dict() settings_overide_1 = [ {"dict": "KPOINTS", "action": {"_set": low_kpts_dict}} ] settings_overide_2.append( {"dict": "KPOINTS", "action": {"_set": orig_kpts_dict}} ) return [VaspJob(vasp_cmd, final=False, suffix=".relax1", auto_npar=auto_npar, auto_continue=auto_continue, settings_override=settings_overide_1), VaspJob(vasp_cmd, final=True, backup=False, suffix=".relax2", auto_npar=auto_npar, auto_continue=auto_continue, settings_override=settings_overide_2)]
[ "def", "double_relaxation_run", "(", "cls", ",", "vasp_cmd", ",", "auto_npar", "=", "True", ",", "ediffg", "=", "-", "0.05", ",", "half_kpts_first_relax", "=", "False", ",", "auto_continue", "=", "False", ")", ":", "incar_update", "=", "{", "\"ISTART\"", ":"...
Returns a list of two jobs corresponding to an AFLOW style double relaxation run. Args: vasp_cmd (str): Command to run vasp as a list of args. For example, if you are using mpirun, it can be something like ["mpirun", "pvasp.5.2.11"] auto_npar (bool): Whether to automatically tune NPAR to be sqrt( number of cores) as recommended by VASP for DFT calculations. Generally, this results in significant speedups. Defaults to True. Set to False for HF, GW and RPA calculations. ediffg (float): Force convergence criteria for subsequent runs ( ignored for the initial run.) half_kpts_first_relax (bool): Whether to halve the kpoint grid for the first relaxation. Speeds up difficult convergence considerably. Defaults to False. Returns: List of two jobs corresponding to an AFLOW style run.
[ "Returns", "a", "list", "of", "two", "jobs", "corresponding", "to", "an", "AFLOW", "style", "double", "relaxation", "run", "." ]
b33b01574fc899f959acb3c495398fd3d0fc41d0
https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/vasp/jobs.py#L244-L298
12,244
materialsproject/custodian
custodian/vasp/jobs.py
VaspJob.metagga_opt_run
def metagga_opt_run(cls, vasp_cmd, auto_npar=True, ediffg=-0.05, half_kpts_first_relax=False, auto_continue=False): """ Returns a list of thres jobs to perform an optimization for any metaGGA functional. There is an initial calculation of the GGA wavefunction which is fed into the initial metaGGA optimization to precondition the electronic structure optimizer. The metaGGA optimization is performed using the double relaxation scheme """ incar = Incar.from_file("INCAR") # Defaults to using the SCAN metaGGA metaGGA = incar.get("METAGGA", "SCAN") # Pre optimze WAVECAR and structure using regular GGA pre_opt_setings = [{"dict": "INCAR", "action": {"_set": {"METAGGA": None, "LWAVE": True, "NSW": 0}}}] jobs = [VaspJob(vasp_cmd, auto_npar=auto_npar, final=False, suffix=".precondition", settings_override=pre_opt_setings)] # Finish with regular double relaxation style run using SCAN jobs.extend(VaspJob.double_relaxation_run(vasp_cmd, auto_npar=auto_npar, ediffg=ediffg, half_kpts_first_relax=half_kpts_first_relax)) # Ensure the first relaxation doesn't overwrite the original inputs jobs[1].backup = False # Update double_relaxation job to start from pre-optimized run post_opt_settings = [{"dict": "INCAR", "action": {"_set": {"METAGGA": metaGGA, "ISTART": 1, "NSW": incar.get("NSW", 99), "LWAVE": incar.get("LWAVE", False)}}}, {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}] if jobs[1].settings_override: post_opt_settings = jobs[1].settings_override + post_opt_settings jobs[1].settings_override = post_opt_settings return jobs
python
def metagga_opt_run(cls, vasp_cmd, auto_npar=True, ediffg=-0.05, half_kpts_first_relax=False, auto_continue=False): incar = Incar.from_file("INCAR") # Defaults to using the SCAN metaGGA metaGGA = incar.get("METAGGA", "SCAN") # Pre optimze WAVECAR and structure using regular GGA pre_opt_setings = [{"dict": "INCAR", "action": {"_set": {"METAGGA": None, "LWAVE": True, "NSW": 0}}}] jobs = [VaspJob(vasp_cmd, auto_npar=auto_npar, final=False, suffix=".precondition", settings_override=pre_opt_setings)] # Finish with regular double relaxation style run using SCAN jobs.extend(VaspJob.double_relaxation_run(vasp_cmd, auto_npar=auto_npar, ediffg=ediffg, half_kpts_first_relax=half_kpts_first_relax)) # Ensure the first relaxation doesn't overwrite the original inputs jobs[1].backup = False # Update double_relaxation job to start from pre-optimized run post_opt_settings = [{"dict": "INCAR", "action": {"_set": {"METAGGA": metaGGA, "ISTART": 1, "NSW": incar.get("NSW", 99), "LWAVE": incar.get("LWAVE", False)}}}, {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}] if jobs[1].settings_override: post_opt_settings = jobs[1].settings_override + post_opt_settings jobs[1].settings_override = post_opt_settings return jobs
[ "def", "metagga_opt_run", "(", "cls", ",", "vasp_cmd", ",", "auto_npar", "=", "True", ",", "ediffg", "=", "-", "0.05", ",", "half_kpts_first_relax", "=", "False", ",", "auto_continue", "=", "False", ")", ":", "incar", "=", "Incar", ".", "from_file", "(", ...
Returns a list of thres jobs to perform an optimization for any metaGGA functional. There is an initial calculation of the GGA wavefunction which is fed into the initial metaGGA optimization to precondition the electronic structure optimizer. The metaGGA optimization is performed using the double relaxation scheme
[ "Returns", "a", "list", "of", "thres", "jobs", "to", "perform", "an", "optimization", "for", "any", "metaGGA", "functional", ".", "There", "is", "an", "initial", "calculation", "of", "the", "GGA", "wavefunction", "which", "is", "fed", "into", "the", "initial...
b33b01574fc899f959acb3c495398fd3d0fc41d0
https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/vasp/jobs.py#L301-L343
12,245
materialsproject/custodian
custodian/vasp/jobs.py
VaspJob.full_opt_run
def full_opt_run(cls, vasp_cmd, vol_change_tol=0.02, max_steps=10, ediffg=-0.05, half_kpts_first_relax=False, **vasp_job_kwargs): """ Returns a generator of jobs for a full optimization run. Basically, this runs an infinite series of geometry optimization jobs until the % vol change in a particular optimization is less than vol_change_tol. Args: vasp_cmd (str): Command to run vasp as a list of args. For example, if you are using mpirun, it can be something like ["mpirun", "pvasp.5.2.11"] vol_change_tol (float): The tolerance at which to stop a run. Defaults to 0.05, i.e., 5%. max_steps (int): The maximum number of runs. Defaults to 10 ( highly unlikely that this limit is ever reached). ediffg (float): Force convergence criteria for subsequent runs ( ignored for the initial run.) half_kpts_first_relax (bool): Whether to halve the kpoint grid for the first relaxation. Speeds up difficult convergence considerably. Defaults to False. \*\*vasp_job_kwargs: Passthrough kwargs to VaspJob. See :class:`custodian.vasp.jobs.VaspJob`. Returns: Generator of jobs. """ for i in range(max_steps): if i == 0: settings = None backup = True if half_kpts_first_relax and os.path.exists("KPOINTS") and \ os.path.exists("POSCAR"): kpts = Kpoints.from_file("KPOINTS") orig_kpts_dict = kpts.as_dict() kpts.kpts = np.maximum(np.array(kpts.kpts) / 2, 1).tolist() low_kpts_dict = kpts.as_dict() settings = [ {"dict": "KPOINTS", "action": {"_set": low_kpts_dict}} ] else: backup = False initial = Poscar.from_file("POSCAR").structure final = Poscar.from_file("CONTCAR").structure vol_change = (final.volume - initial.volume) / initial.volume logger.info("Vol change = %.1f %%!" % (vol_change * 100)) if abs(vol_change) < vol_change_tol: logger.info("Stopping optimization!") break else: incar_update = {"ISTART": 1} if ediffg: incar_update["EDIFFG"] = ediffg settings = [ {"dict": "INCAR", "action": {"_set": incar_update}}, {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}] if i == 1 and half_kpts_first_relax: settings.append({"dict": "KPOINTS", "action": {"_set": orig_kpts_dict}}) logger.info("Generating job = %d!" % (i+1)) yield VaspJob(vasp_cmd, final=False, backup=backup, suffix=".relax%d" % (i+1), settings_override=settings, **vasp_job_kwargs)
python
def full_opt_run(cls, vasp_cmd, vol_change_tol=0.02, max_steps=10, ediffg=-0.05, half_kpts_first_relax=False, **vasp_job_kwargs): for i in range(max_steps): if i == 0: settings = None backup = True if half_kpts_first_relax and os.path.exists("KPOINTS") and \ os.path.exists("POSCAR"): kpts = Kpoints.from_file("KPOINTS") orig_kpts_dict = kpts.as_dict() kpts.kpts = np.maximum(np.array(kpts.kpts) / 2, 1).tolist() low_kpts_dict = kpts.as_dict() settings = [ {"dict": "KPOINTS", "action": {"_set": low_kpts_dict}} ] else: backup = False initial = Poscar.from_file("POSCAR").structure final = Poscar.from_file("CONTCAR").structure vol_change = (final.volume - initial.volume) / initial.volume logger.info("Vol change = %.1f %%!" % (vol_change * 100)) if abs(vol_change) < vol_change_tol: logger.info("Stopping optimization!") break else: incar_update = {"ISTART": 1} if ediffg: incar_update["EDIFFG"] = ediffg settings = [ {"dict": "INCAR", "action": {"_set": incar_update}}, {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}] if i == 1 and half_kpts_first_relax: settings.append({"dict": "KPOINTS", "action": {"_set": orig_kpts_dict}}) logger.info("Generating job = %d!" % (i+1)) yield VaspJob(vasp_cmd, final=False, backup=backup, suffix=".relax%d" % (i+1), settings_override=settings, **vasp_job_kwargs)
[ "def", "full_opt_run", "(", "cls", ",", "vasp_cmd", ",", "vol_change_tol", "=", "0.02", ",", "max_steps", "=", "10", ",", "ediffg", "=", "-", "0.05", ",", "half_kpts_first_relax", "=", "False", ",", "*", "*", "vasp_job_kwargs", ")", ":", "for", "i", "in"...
Returns a generator of jobs for a full optimization run. Basically, this runs an infinite series of geometry optimization jobs until the % vol change in a particular optimization is less than vol_change_tol. Args: vasp_cmd (str): Command to run vasp as a list of args. For example, if you are using mpirun, it can be something like ["mpirun", "pvasp.5.2.11"] vol_change_tol (float): The tolerance at which to stop a run. Defaults to 0.05, i.e., 5%. max_steps (int): The maximum number of runs. Defaults to 10 ( highly unlikely that this limit is ever reached). ediffg (float): Force convergence criteria for subsequent runs ( ignored for the initial run.) half_kpts_first_relax (bool): Whether to halve the kpoint grid for the first relaxation. Speeds up difficult convergence considerably. Defaults to False. \*\*vasp_job_kwargs: Passthrough kwargs to VaspJob. See :class:`custodian.vasp.jobs.VaspJob`. Returns: Generator of jobs.
[ "Returns", "a", "generator", "of", "jobs", "for", "a", "full", "optimization", "run", ".", "Basically", "this", "runs", "an", "infinite", "series", "of", "geometry", "optimization", "jobs", "until", "the", "%", "vol", "change", "in", "a", "particular", "opti...
b33b01574fc899f959acb3c495398fd3d0fc41d0
https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/vasp/jobs.py#L346-L412
12,246
materialsproject/custodian
custodian/vasp/jobs.py
VaspNEBJob.setup
def setup(self): """ Performs initial setup for VaspNEBJob, including overriding any settings and backing up. """ neb_dirs = self.neb_dirs if self.backup: # Back up KPOINTS, INCAR, POTCAR for f in VASP_NEB_INPUT_FILES: shutil.copy(f, "{}.orig".format(f)) # Back up POSCARs for path in neb_dirs: poscar = os.path.join(path, "POSCAR") shutil.copy(poscar, "{}.orig".format(poscar)) if self.half_kpts and os.path.exists("KPOINTS"): kpts = Kpoints.from_file("KPOINTS") kpts.kpts = np.maximum(np.array(kpts.kpts) / 2, 1) kpts.kpts = kpts.kpts.astype(int).tolist() if tuple(kpts.kpts[0]) == (1, 1, 1): kpt_dic = kpts.as_dict() kpt_dic["generation_style"] = 'Gamma' kpts = Kpoints.from_dict(kpt_dic) kpts.write_file("KPOINTS") if self.auto_npar: try: incar = Incar.from_file("INCAR") import multiprocessing # Try sge environment variable first # (since multiprocessing counts cores on the current # machine only) ncores = os.environ.get('NSLOTS') or multiprocessing.cpu_count() ncores = int(ncores) for npar in range(int(math.sqrt(ncores)), ncores): if ncores % npar == 0: incar["NPAR"] = npar break incar.write_file("INCAR") except: pass if self.auto_continue and \ os.path.exists("STOPCAR") and \ not os.access("STOPCAR", os.W_OK): # Remove STOPCAR os.chmod("STOPCAR", 0o644) os.remove("STOPCAR") # Copy CONTCAR to POSCAR for path in self.neb_sub: contcar = os.path.join(path, "CONTCAR") poscar = os.path.join(path, "POSCAR") shutil.copy(contcar, poscar) if self.settings_override is not None: VaspModder().apply_actions(self.settings_override)
python
def setup(self): neb_dirs = self.neb_dirs if self.backup: # Back up KPOINTS, INCAR, POTCAR for f in VASP_NEB_INPUT_FILES: shutil.copy(f, "{}.orig".format(f)) # Back up POSCARs for path in neb_dirs: poscar = os.path.join(path, "POSCAR") shutil.copy(poscar, "{}.orig".format(poscar)) if self.half_kpts and os.path.exists("KPOINTS"): kpts = Kpoints.from_file("KPOINTS") kpts.kpts = np.maximum(np.array(kpts.kpts) / 2, 1) kpts.kpts = kpts.kpts.astype(int).tolist() if tuple(kpts.kpts[0]) == (1, 1, 1): kpt_dic = kpts.as_dict() kpt_dic["generation_style"] = 'Gamma' kpts = Kpoints.from_dict(kpt_dic) kpts.write_file("KPOINTS") if self.auto_npar: try: incar = Incar.from_file("INCAR") import multiprocessing # Try sge environment variable first # (since multiprocessing counts cores on the current # machine only) ncores = os.environ.get('NSLOTS') or multiprocessing.cpu_count() ncores = int(ncores) for npar in range(int(math.sqrt(ncores)), ncores): if ncores % npar == 0: incar["NPAR"] = npar break incar.write_file("INCAR") except: pass if self.auto_continue and \ os.path.exists("STOPCAR") and \ not os.access("STOPCAR", os.W_OK): # Remove STOPCAR os.chmod("STOPCAR", 0o644) os.remove("STOPCAR") # Copy CONTCAR to POSCAR for path in self.neb_sub: contcar = os.path.join(path, "CONTCAR") poscar = os.path.join(path, "POSCAR") shutil.copy(contcar, poscar) if self.settings_override is not None: VaspModder().apply_actions(self.settings_override)
[ "def", "setup", "(", "self", ")", ":", "neb_dirs", "=", "self", ".", "neb_dirs", "if", "self", ".", "backup", ":", "# Back up KPOINTS, INCAR, POTCAR", "for", "f", "in", "VASP_NEB_INPUT_FILES", ":", "shutil", ".", "copy", "(", "f", ",", "\"{}.orig\"", ".", ...
Performs initial setup for VaspNEBJob, including overriding any settings and backing up.
[ "Performs", "initial", "setup", "for", "VaspNEBJob", "including", "overriding", "any", "settings", "and", "backing", "up", "." ]
b33b01574fc899f959acb3c495398fd3d0fc41d0
https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/vasp/jobs.py#L686-L744
12,247
materialsproject/custodian
custodian/vasp/jobs.py
VaspNEBJob.postprocess
def postprocess(self): """ Postprocessing includes renaming and gzipping where necessary. """ # Add suffix to all sub_dir/{items} for path in self.neb_dirs: for f in VASP_NEB_OUTPUT_SUB_FILES: f = os.path.join(path, f) if os.path.exists(f): if self.final and self.suffix != "": shutil.move(f, "{}{}".format(f, self.suffix)) elif self.suffix != "": shutil.copy(f, "{}{}".format(f, self.suffix)) # Add suffix to all output files for f in VASP_NEB_OUTPUT_FILES + [self.output_file]: if os.path.exists(f): if self.final and self.suffix != "": shutil.move(f, "{}{}".format(f, self.suffix)) elif self.suffix != "": shutil.copy(f, "{}{}".format(f, self.suffix))
python
def postprocess(self): # Add suffix to all sub_dir/{items} for path in self.neb_dirs: for f in VASP_NEB_OUTPUT_SUB_FILES: f = os.path.join(path, f) if os.path.exists(f): if self.final and self.suffix != "": shutil.move(f, "{}{}".format(f, self.suffix)) elif self.suffix != "": shutil.copy(f, "{}{}".format(f, self.suffix)) # Add suffix to all output files for f in VASP_NEB_OUTPUT_FILES + [self.output_file]: if os.path.exists(f): if self.final and self.suffix != "": shutil.move(f, "{}{}".format(f, self.suffix)) elif self.suffix != "": shutil.copy(f, "{}{}".format(f, self.suffix))
[ "def", "postprocess", "(", "self", ")", ":", "# Add suffix to all sub_dir/{items}", "for", "path", "in", "self", ".", "neb_dirs", ":", "for", "f", "in", "VASP_NEB_OUTPUT_SUB_FILES", ":", "f", "=", "os", ".", "path", ".", "join", "(", "path", ",", "f", ")",...
Postprocessing includes renaming and gzipping where necessary.
[ "Postprocessing", "includes", "renaming", "and", "gzipping", "where", "necessary", "." ]
b33b01574fc899f959acb3c495398fd3d0fc41d0
https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/vasp/jobs.py#L771-L791
12,248
materialsproject/custodian
custodian/nwchem/jobs.py
NwchemJob.setup
def setup(self): """ Performs backup if necessary. """ if self.backup: shutil.copy(self.input_file, "{}.orig".format(self.input_file))
python
def setup(self): if self.backup: shutil.copy(self.input_file, "{}.orig".format(self.input_file))
[ "def", "setup", "(", "self", ")", ":", "if", "self", ".", "backup", ":", "shutil", ".", "copy", "(", "self", ".", "input_file", ",", "\"{}.orig\"", ".", "format", "(", "self", ".", "input_file", ")", ")" ]
Performs backup if necessary.
[ "Performs", "backup", "if", "necessary", "." ]
b33b01574fc899f959acb3c495398fd3d0fc41d0
https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/nwchem/jobs.py#L58-L63
12,249
materialsproject/custodian
custodian/nwchem/jobs.py
NwchemJob.run
def run(self): """ Performs actual nwchem run. """ with zopen(self.output_file, 'w') as fout: return subprocess.Popen(self.nwchem_cmd + [self.input_file], stdout=fout)
python
def run(self): with zopen(self.output_file, 'w') as fout: return subprocess.Popen(self.nwchem_cmd + [self.input_file], stdout=fout)
[ "def", "run", "(", "self", ")", ":", "with", "zopen", "(", "self", ".", "output_file", ",", "'w'", ")", "as", "fout", ":", "return", "subprocess", ".", "Popen", "(", "self", ".", "nwchem_cmd", "+", "[", "self", ".", "input_file", "]", ",", "stdout", ...
Performs actual nwchem run.
[ "Performs", "actual", "nwchem", "run", "." ]
b33b01574fc899f959acb3c495398fd3d0fc41d0
https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/nwchem/jobs.py#L65-L71
12,250
wdecoster/nanofilt
nanofilt/NanoFilt.py
valid_GC
def valid_GC(x): """type function for argparse to check GC values. Check if the supplied value for minGC and maxGC is a valid input, being between 0 and 1 """ x = float(x) if x < 0.0 or x > 1.0: raise ArgumentTypeError("{} not in range [0.0, 1.0]".format(x)) return x
python
def valid_GC(x): x = float(x) if x < 0.0 or x > 1.0: raise ArgumentTypeError("{} not in range [0.0, 1.0]".format(x)) return x
[ "def", "valid_GC", "(", "x", ")", ":", "x", "=", "float", "(", "x", ")", "if", "x", "<", "0.0", "or", "x", ">", "1.0", ":", "raise", "ArgumentTypeError", "(", "\"{} not in range [0.0, 1.0]\"", ".", "format", "(", "x", ")", ")", "return", "x" ]
type function for argparse to check GC values. Check if the supplied value for minGC and maxGC is a valid input, being between 0 and 1
[ "type", "function", "for", "argparse", "to", "check", "GC", "values", "." ]
513bdc529317bebbd743c0dff799472f35d92f45
https://github.com/wdecoster/nanofilt/blob/513bdc529317bebbd743c0dff799472f35d92f45/nanofilt/NanoFilt.py#L157-L165
12,251
wdecoster/nanofilt
nanofilt/NanoFilt.py
filter_stream
def filter_stream(fq, args): """Filter a fastq file on stdin. Print fastq record to stdout if it passes - quality filter (optional) - length filter (optional) - min/maxGC filter (optional) Optionally trim a number of nucleotides from beginning and end. Record has to be longer than args.length (default 1) after trimming Use a faster silent quality_check if no filtering on quality is required """ if args.quality: quality_check = ave_qual else: quality_check = silent_quality_check minlen = args.length + int(args.headcrop or 0) - (int(args.tailcrop or 0)) for rec in SeqIO.parse(fq, "fastq"): if args.GC_filter: gc = (rec.seq.upper().count("C") + rec.seq.upper().count("G")) / len(rec) else: gc = 0.50 # dummy variable if quality_check(rec.letter_annotations["phred_quality"]) > args.quality \ and minlen <= len(rec) <= args.maxlength \ and args.minGC <= gc <= args.maxGC: print(rec[args.headcrop:args.tailcrop].format("fastq"), end="")
python
def filter_stream(fq, args): if args.quality: quality_check = ave_qual else: quality_check = silent_quality_check minlen = args.length + int(args.headcrop or 0) - (int(args.tailcrop or 0)) for rec in SeqIO.parse(fq, "fastq"): if args.GC_filter: gc = (rec.seq.upper().count("C") + rec.seq.upper().count("G")) / len(rec) else: gc = 0.50 # dummy variable if quality_check(rec.letter_annotations["phred_quality"]) > args.quality \ and minlen <= len(rec) <= args.maxlength \ and args.minGC <= gc <= args.maxGC: print(rec[args.headcrop:args.tailcrop].format("fastq"), end="")
[ "def", "filter_stream", "(", "fq", ",", "args", ")", ":", "if", "args", ".", "quality", ":", "quality_check", "=", "ave_qual", "else", ":", "quality_check", "=", "silent_quality_check", "minlen", "=", "args", ".", "length", "+", "int", "(", "args", ".", ...
Filter a fastq file on stdin. Print fastq record to stdout if it passes - quality filter (optional) - length filter (optional) - min/maxGC filter (optional) Optionally trim a number of nucleotides from beginning and end. Record has to be longer than args.length (default 1) after trimming Use a faster silent quality_check if no filtering on quality is required
[ "Filter", "a", "fastq", "file", "on", "stdin", "." ]
513bdc529317bebbd743c0dff799472f35d92f45
https://github.com/wdecoster/nanofilt/blob/513bdc529317bebbd743c0dff799472f35d92f45/nanofilt/NanoFilt.py#L173-L197
12,252
wdecoster/nanofilt
nanofilt/NanoFilt.py
filter_using_summary
def filter_using_summary(fq, args): """Use quality scores from albacore summary file for filtering Use the summary file from albacore for more accurate quality estimate Get the dataframe from nanoget, convert to dictionary """ data = {entry[0]: entry[1] for entry in process_summary( summaryfile=args.summary, threads="NA", readtype=args.readtype, barcoded=False)[ ["readIDs", "quals"]].itertuples(index=False)} try: for record in SeqIO.parse(fq, "fastq"): if data[record.id] > args.quality \ and args.length <= len(record) <= args.maxlength: print(record[args.headcrop:args.tailcrop].format("fastq"), end="") except KeyError: logging.error("mismatch between summary and fastq: \ {} was not found in the summary file.".format(record.id)) sys.exit('\nERROR: mismatch between sequencing_summary and fastq file: \ {} was not found in the summary file.\nQuitting.'.format(record.id))
python
def filter_using_summary(fq, args): data = {entry[0]: entry[1] for entry in process_summary( summaryfile=args.summary, threads="NA", readtype=args.readtype, barcoded=False)[ ["readIDs", "quals"]].itertuples(index=False)} try: for record in SeqIO.parse(fq, "fastq"): if data[record.id] > args.quality \ and args.length <= len(record) <= args.maxlength: print(record[args.headcrop:args.tailcrop].format("fastq"), end="") except KeyError: logging.error("mismatch between summary and fastq: \ {} was not found in the summary file.".format(record.id)) sys.exit('\nERROR: mismatch between sequencing_summary and fastq file: \ {} was not found in the summary file.\nQuitting.'.format(record.id))
[ "def", "filter_using_summary", "(", "fq", ",", "args", ")", ":", "data", "=", "{", "entry", "[", "0", "]", ":", "entry", "[", "1", "]", "for", "entry", "in", "process_summary", "(", "summaryfile", "=", "args", ".", "summary", ",", "threads", "=", "\"...
Use quality scores from albacore summary file for filtering Use the summary file from albacore for more accurate quality estimate Get the dataframe from nanoget, convert to dictionary
[ "Use", "quality", "scores", "from", "albacore", "summary", "file", "for", "filtering" ]
513bdc529317bebbd743c0dff799472f35d92f45
https://github.com/wdecoster/nanofilt/blob/513bdc529317bebbd743c0dff799472f35d92f45/nanofilt/NanoFilt.py#L200-L221
12,253
milesrichardson/ParsePy
parse_rest/connection.py
master_key_required
def master_key_required(func): '''decorator describing methods that require the master key''' def ret(obj, *args, **kw): conn = ACCESS_KEYS if not (conn and conn.get('master_key')): message = '%s requires the master key' % func.__name__ raise core.ParseError(message) func(obj, *args, **kw) return ret
python
def master_key_required(func): '''decorator describing methods that require the master key''' def ret(obj, *args, **kw): conn = ACCESS_KEYS if not (conn and conn.get('master_key')): message = '%s requires the master key' % func.__name__ raise core.ParseError(message) func(obj, *args, **kw) return ret
[ "def", "master_key_required", "(", "func", ")", ":", "def", "ret", "(", "obj", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "conn", "=", "ACCESS_KEYS", "if", "not", "(", "conn", "and", "conn", ".", "get", "(", "'master_key'", ")", ")", ":", "...
decorator describing methods that require the master key
[ "decorator", "describing", "methods", "that", "require", "the", "master", "key" ]
7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea
https://github.com/milesrichardson/ParsePy/blob/7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea/parse_rest/connection.py#L66-L74
12,254
milesrichardson/ParsePy
parse_rest/connection.py
ParseBase.execute
def execute(cls, uri, http_verb, extra_headers=None, batch=False, _body=None, **kw): """ if batch == False, execute a command with the given parameters and return the response JSON. If batch == True, return the dictionary that would be used in a batch command. """ if batch: urlsplitter = urlparse(API_ROOT).netloc ret = {"method": http_verb, "path": uri.split(urlsplitter, 1)[1]} if kw: ret["body"] = kw return ret if not ('app_id' in ACCESS_KEYS and 'rest_key' in ACCESS_KEYS): raise core.ParseError('Missing connection credentials') app_id = ACCESS_KEYS.get('app_id') rest_key = ACCESS_KEYS.get('rest_key') master_key = ACCESS_KEYS.get('master_key') url = uri if uri.startswith(API_ROOT) else cls.ENDPOINT_ROOT + uri if _body is None: data = kw and json.dumps(kw, default=date_handler) or "{}" else: data = _body if http_verb == 'GET' and data: url += '?%s' % urlencode(kw) data = None else: if cls.__name__ == 'File': data = data else: data = data.encode('utf-8') headers = { 'Content-type': 'application/json', 'X-Parse-Application-Id': app_id, 'X-Parse-REST-API-Key': rest_key } headers.update(extra_headers or {}) if cls.__name__ == 'File': request = Request(url.encode('utf-8'), data, headers) else: request = Request(url, data, headers) if ACCESS_KEYS.get('session_token'): request.add_header('X-Parse-Session-Token', ACCESS_KEYS.get('session_token')) elif master_key: request.add_header('X-Parse-Master-Key', master_key) request.get_method = lambda: http_verb try: response = urlopen(request, timeout=CONNECTION_TIMEOUT) except HTTPError as e: exc = { 400: core.ResourceRequestBadRequest, 401: core.ResourceRequestLoginRequired, 403: core.ResourceRequestForbidden, 404: core.ResourceRequestNotFound }.get(e.code, core.ParseError) raise exc(e.read()) return json.loads(response.read().decode('utf-8'))
python
def execute(cls, uri, http_verb, extra_headers=None, batch=False, _body=None, **kw): if batch: urlsplitter = urlparse(API_ROOT).netloc ret = {"method": http_verb, "path": uri.split(urlsplitter, 1)[1]} if kw: ret["body"] = kw return ret if not ('app_id' in ACCESS_KEYS and 'rest_key' in ACCESS_KEYS): raise core.ParseError('Missing connection credentials') app_id = ACCESS_KEYS.get('app_id') rest_key = ACCESS_KEYS.get('rest_key') master_key = ACCESS_KEYS.get('master_key') url = uri if uri.startswith(API_ROOT) else cls.ENDPOINT_ROOT + uri if _body is None: data = kw and json.dumps(kw, default=date_handler) or "{}" else: data = _body if http_verb == 'GET' and data: url += '?%s' % urlencode(kw) data = None else: if cls.__name__ == 'File': data = data else: data = data.encode('utf-8') headers = { 'Content-type': 'application/json', 'X-Parse-Application-Id': app_id, 'X-Parse-REST-API-Key': rest_key } headers.update(extra_headers or {}) if cls.__name__ == 'File': request = Request(url.encode('utf-8'), data, headers) else: request = Request(url, data, headers) if ACCESS_KEYS.get('session_token'): request.add_header('X-Parse-Session-Token', ACCESS_KEYS.get('session_token')) elif master_key: request.add_header('X-Parse-Master-Key', master_key) request.get_method = lambda: http_verb try: response = urlopen(request, timeout=CONNECTION_TIMEOUT) except HTTPError as e: exc = { 400: core.ResourceRequestBadRequest, 401: core.ResourceRequestLoginRequired, 403: core.ResourceRequestForbidden, 404: core.ResourceRequestNotFound }.get(e.code, core.ParseError) raise exc(e.read()) return json.loads(response.read().decode('utf-8'))
[ "def", "execute", "(", "cls", ",", "uri", ",", "http_verb", ",", "extra_headers", "=", "None", ",", "batch", "=", "False", ",", "_body", "=", "None", ",", "*", "*", "kw", ")", ":", "if", "batch", ":", "urlsplitter", "=", "urlparse", "(", "API_ROOT", ...
if batch == False, execute a command with the given parameters and return the response JSON. If batch == True, return the dictionary that would be used in a batch command.
[ "if", "batch", "==", "False", "execute", "a", "command", "with", "the", "given", "parameters", "and", "return", "the", "response", "JSON", ".", "If", "batch", "==", "True", "return", "the", "dictionary", "that", "would", "be", "used", "in", "a", "batch", ...
7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea
https://github.com/milesrichardson/ParsePy/blob/7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea/parse_rest/connection.py#L85-L150
12,255
milesrichardson/ParsePy
parse_rest/connection.py
ParseBatcher.batch
def batch(self, methods): """ Given a list of create, update or delete methods to call, call all of them in a single batch operation. """ methods = list(methods) # methods can be iterator if not methods: #accepts also empty list (or generator) - it allows call batch directly with query result (eventually empty) return queries, callbacks = list(zip(*[m(batch=True) for m in methods])) # perform all the operations in one batch responses = self.execute("", "POST", requests=queries) # perform the callbacks with the response data (updating the existing # objets, etc) batched_errors = [] for callback, response in zip(callbacks, responses): if "success" in response: callback(response["success"]) else: batched_errors.append(response["error"]) if batched_errors: raise core.ParseBatchError(batched_errors)
python
def batch(self, methods): methods = list(methods) # methods can be iterator if not methods: #accepts also empty list (or generator) - it allows call batch directly with query result (eventually empty) return queries, callbacks = list(zip(*[m(batch=True) for m in methods])) # perform all the operations in one batch responses = self.execute("", "POST", requests=queries) # perform the callbacks with the response data (updating the existing # objets, etc) batched_errors = [] for callback, response in zip(callbacks, responses): if "success" in response: callback(response["success"]) else: batched_errors.append(response["error"]) if batched_errors: raise core.ParseBatchError(batched_errors)
[ "def", "batch", "(", "self", ",", "methods", ")", ":", "methods", "=", "list", "(", "methods", ")", "# methods can be iterator", "if", "not", "methods", ":", "#accepts also empty list (or generator) - it allows call batch directly with query result (eventually empty)", "retur...
Given a list of create, update or delete methods to call, call all of them in a single batch operation.
[ "Given", "a", "list", "of", "create", "update", "or", "delete", "methods", "to", "call", "call", "all", "of", "them", "in", "a", "single", "batch", "operation", "." ]
7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea
https://github.com/milesrichardson/ParsePy/blob/7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea/parse_rest/connection.py#L178-L201
12,256
milesrichardson/ParsePy
parse_rest/installation.py
Installation.update_channels
def update_channels(cls, installation_id, channels_to_add=set(), channels_to_remove=set(), **kw): """ Allow an application to manually subscribe or unsubscribe an installation to a certain push channel in a unified operation. this is based on: https://www.parse.com/docs/rest#installations-updating installation_id: the installation id you'd like to add a channel to channels_to_add: the name of the channel you'd like to subscribe the user to channels_to_remove: the name of the channel you'd like to unsubscribe the user from """ installation_url = cls._get_installation_url(installation_id) current_config = cls.GET(installation_url) new_channels = list(set(current_config['channels']).union(channels_to_add).difference(channels_to_remove)) cls.PUT(installation_url, channels=new_channels)
python
def update_channels(cls, installation_id, channels_to_add=set(), channels_to_remove=set(), **kw): installation_url = cls._get_installation_url(installation_id) current_config = cls.GET(installation_url) new_channels = list(set(current_config['channels']).union(channels_to_add).difference(channels_to_remove)) cls.PUT(installation_url, channels=new_channels)
[ "def", "update_channels", "(", "cls", ",", "installation_id", ",", "channels_to_add", "=", "set", "(", ")", ",", "channels_to_remove", "=", "set", "(", ")", ",", "*", "*", "kw", ")", ":", "installation_url", "=", "cls", ".", "_get_installation_url", "(", "...
Allow an application to manually subscribe or unsubscribe an installation to a certain push channel in a unified operation. this is based on: https://www.parse.com/docs/rest#installations-updating installation_id: the installation id you'd like to add a channel to channels_to_add: the name of the channel you'd like to subscribe the user to channels_to_remove: the name of the channel you'd like to unsubscribe the user from
[ "Allow", "an", "application", "to", "manually", "subscribe", "or", "unsubscribe", "an", "installation", "to", "a", "certain", "push", "channel", "in", "a", "unified", "operation", "." ]
7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea
https://github.com/milesrichardson/ParsePy/blob/7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea/parse_rest/installation.py#L30-L49
12,257
milesrichardson/ParsePy
parse_rest/query.py
Queryset._fetch
def _fetch(self, count=False): if self._result_cache is not None: return len(self._result_cache) if count else self._result_cache """ Return a list of objects matching query, or if count == True return only the number of objects matching. """ options = dict(self._options) # make a local copy if self._where: # JSON encode WHERE values options['where'] = json.dumps(self._where) if self._select_related: options['include'] = ','.join(self._select_related) if count: return self._manager._count(**options) self._result_cache = self._manager._fetch(**options) return self._result_cache
python
def _fetch(self, count=False): if self._result_cache is not None: return len(self._result_cache) if count else self._result_cache options = dict(self._options) # make a local copy if self._where: # JSON encode WHERE values options['where'] = json.dumps(self._where) if self._select_related: options['include'] = ','.join(self._select_related) if count: return self._manager._count(**options) self._result_cache = self._manager._fetch(**options) return self._result_cache
[ "def", "_fetch", "(", "self", ",", "count", "=", "False", ")", ":", "if", "self", ".", "_result_cache", "is", "not", "None", ":", "return", "len", "(", "self", ".", "_result_cache", ")", "if", "count", "else", "self", ".", "_result_cache", "options", "...
Return a list of objects matching query, or if count == True return only the number of objects matching.
[ "Return", "a", "list", "of", "objects", "matching", "query", "or", "if", "count", "==", "True", "return", "only", "the", "number", "of", "objects", "matching", "." ]
7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea
https://github.com/milesrichardson/ParsePy/blob/7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea/parse_rest/query.py#L111-L128
12,258
milesrichardson/ParsePy
parse_rest/datatypes.py
complex_type
def complex_type(name=None): '''Decorator for registering complex types''' def wrapped(cls): ParseType.type_mapping[name or cls.__name__] = cls return cls return wrapped
python
def complex_type(name=None): '''Decorator for registering complex types''' def wrapped(cls): ParseType.type_mapping[name or cls.__name__] = cls return cls return wrapped
[ "def", "complex_type", "(", "name", "=", "None", ")", ":", "def", "wrapped", "(", "cls", ")", ":", "ParseType", ".", "type_mapping", "[", "name", "or", "cls", ".", "__name__", "]", "=", "cls", "return", "cls", "return", "wrapped" ]
Decorator for registering complex types
[ "Decorator", "for", "registering", "complex", "types" ]
7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea
https://github.com/milesrichardson/ParsePy/blob/7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea/parse_rest/datatypes.py#L25-L30
12,259
milesrichardson/ParsePy
parse_rest/datatypes.py
Object.schema
def schema(cls): """Retrieves the class' schema.""" root = '/'.join([API_ROOT, 'schemas', cls.__name__]) schema = cls.GET(root) return schema
python
def schema(cls): root = '/'.join([API_ROOT, 'schemas', cls.__name__]) schema = cls.GET(root) return schema
[ "def", "schema", "(", "cls", ")", ":", "root", "=", "'/'", ".", "join", "(", "[", "API_ROOT", ",", "'schemas'", ",", "cls", ".", "__name__", "]", ")", "schema", "=", "cls", ".", "GET", "(", "root", ")", "return", "schema" ]
Retrieves the class' schema.
[ "Retrieves", "the", "class", "schema", "." ]
7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea
https://github.com/milesrichardson/ParsePy/blob/7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea/parse_rest/datatypes.py#L568-L572
12,260
milesrichardson/ParsePy
parse_rest/datatypes.py
Object.schema_delete_field
def schema_delete_field(cls, key): """Deletes a field.""" root = '/'.join([API_ROOT, 'schemas', cls.__name__]) payload = { 'className': cls.__name__, 'fields': { key: { '__op': 'Delete' } } } cls.PUT(root, **payload)
python
def schema_delete_field(cls, key): root = '/'.join([API_ROOT, 'schemas', cls.__name__]) payload = { 'className': cls.__name__, 'fields': { key: { '__op': 'Delete' } } } cls.PUT(root, **payload)
[ "def", "schema_delete_field", "(", "cls", ",", "key", ")", ":", "root", "=", "'/'", ".", "join", "(", "[", "API_ROOT", ",", "'schemas'", ",", "cls", ".", "__name__", "]", ")", "payload", "=", "{", "'className'", ":", "cls", ".", "__name__", ",", "'fi...
Deletes a field.
[ "Deletes", "a", "field", "." ]
7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea
https://github.com/milesrichardson/ParsePy/blob/7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea/parse_rest/datatypes.py#L575-L586
12,261
milesrichardson/ParsePy
parse_rest/user.py
login_required
def login_required(func): '''decorator describing User methods that need to be logged in''' def ret(obj, *args, **kw): if not hasattr(obj, 'sessionToken'): message = '%s requires a logged-in session' % func.__name__ raise ResourceRequestLoginRequired(message) return func(obj, *args, **kw) return ret
python
def login_required(func): '''decorator describing User methods that need to be logged in''' def ret(obj, *args, **kw): if not hasattr(obj, 'sessionToken'): message = '%s requires a logged-in session' % func.__name__ raise ResourceRequestLoginRequired(message) return func(obj, *args, **kw) return ret
[ "def", "login_required", "(", "func", ")", ":", "def", "ret", "(", "obj", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "if", "not", "hasattr", "(", "obj", ",", "'sessionToken'", ")", ":", "message", "=", "'%s requires a logged-in session'", "%", "f...
decorator describing User methods that need to be logged in
[ "decorator", "describing", "User", "methods", "that", "need", "to", "be", "logged", "in" ]
7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea
https://github.com/milesrichardson/ParsePy/blob/7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea/parse_rest/user.py#L21-L28
12,262
clld/clldutils
src/clldutils/jsonlib.py
parse
def parse(d): """Convert iso formatted timestamps found as values in the dict d to datetime objects. :return: A shallow copy of d with converted timestamps. """ res = {} for k, v in iteritems(d): if isinstance(v, string_types) and DATETIME_ISO_FORMAT.match(v): v = dateutil.parser.parse(v) res[k] = v return res
python
def parse(d): res = {} for k, v in iteritems(d): if isinstance(v, string_types) and DATETIME_ISO_FORMAT.match(v): v = dateutil.parser.parse(v) res[k] = v return res
[ "def", "parse", "(", "d", ")", ":", "res", "=", "{", "}", "for", "k", ",", "v", "in", "iteritems", "(", "d", ")", ":", "if", "isinstance", "(", "v", ",", "string_types", ")", "and", "DATETIME_ISO_FORMAT", ".", "match", "(", "v", ")", ":", "v", ...
Convert iso formatted timestamps found as values in the dict d to datetime objects. :return: A shallow copy of d with converted timestamps.
[ "Convert", "iso", "formatted", "timestamps", "found", "as", "values", "in", "the", "dict", "d", "to", "datetime", "objects", "." ]
7b8587ef5b56a2fc6cafaff90bc5004355c2b13f
https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/jsonlib.py#L18-L28
12,263
clld/clldutils
src/clldutils/jsonlib.py
dump
def dump(obj, path, **kw): """Python 2 + 3 compatible version of json.dump. :param obj: The object to be dumped. :param path: The path of the JSON file to be written. :param kw: Keyword parameters are passed to json.dump """ open_kw = {'mode': 'w'} if PY3: # pragma: no cover open_kw['encoding'] = 'utf-8' # avoid indented lines ending with ", " on PY2 if kw.get('indent') and kw.get('separators') is None: kw['separators'] = (',', ': ') with open(str(path), **open_kw) as fp: return json.dump(obj, fp, **kw)
python
def dump(obj, path, **kw): open_kw = {'mode': 'w'} if PY3: # pragma: no cover open_kw['encoding'] = 'utf-8' # avoid indented lines ending with ", " on PY2 if kw.get('indent') and kw.get('separators') is None: kw['separators'] = (',', ': ') with open(str(path), **open_kw) as fp: return json.dump(obj, fp, **kw)
[ "def", "dump", "(", "obj", ",", "path", ",", "*", "*", "kw", ")", ":", "open_kw", "=", "{", "'mode'", ":", "'w'", "}", "if", "PY3", ":", "# pragma: no cover", "open_kw", "[", "'encoding'", "]", "=", "'utf-8'", "# avoid indented lines ending with \", \" on PY...
Python 2 + 3 compatible version of json.dump. :param obj: The object to be dumped. :param path: The path of the JSON file to be written. :param kw: Keyword parameters are passed to json.dump
[ "Python", "2", "+", "3", "compatible", "version", "of", "json", ".", "dump", "." ]
7b8587ef5b56a2fc6cafaff90bc5004355c2b13f
https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/jsonlib.py#L37-L53
12,264
clld/clldutils
src/clldutils/text.py
strip_brackets
def strip_brackets(text, brackets=None): """Strip brackets and what is inside brackets from text. .. note:: If the text contains only one opening bracket, the rest of the text will be ignored. This is a feature, not a bug, as we want to avoid that this function raises errors too easily. """ res = [] for c, type_ in _tokens(text, brackets=brackets): if type_ == TextType.text: res.append(c) return ''.join(res).strip()
python
def strip_brackets(text, brackets=None): res = [] for c, type_ in _tokens(text, brackets=brackets): if type_ == TextType.text: res.append(c) return ''.join(res).strip()
[ "def", "strip_brackets", "(", "text", ",", "brackets", "=", "None", ")", ":", "res", "=", "[", "]", "for", "c", ",", "type_", "in", "_tokens", "(", "text", ",", "brackets", "=", "brackets", ")", ":", "if", "type_", "==", "TextType", ".", "text", ":...
Strip brackets and what is inside brackets from text. .. note:: If the text contains only one opening bracket, the rest of the text will be ignored. This is a feature, not a bug, as we want to avoid that this function raises errors too easily.
[ "Strip", "brackets", "and", "what", "is", "inside", "brackets", "from", "text", "." ]
7b8587ef5b56a2fc6cafaff90bc5004355c2b13f
https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/text.py#L56-L68
12,265
clld/clldutils
src/clldutils/text.py
split_text_with_context
def split_text_with_context(text, separators=WHITESPACE, brackets=None): """Splits text at separators outside of brackets. :param text: :param separators: An iterable of single character tokens. :param brackets: :return: A `list` of non-empty chunks. .. note:: This function leaves content in brackets in the chunks. """ res, chunk = [], [] for c, type_ in _tokens(text, brackets=brackets): if type_ == TextType.text and c in separators: res.append(''.join(chunk).strip()) chunk = [] else: chunk.append(c) res.append(''.join(chunk).strip()) return nfilter(res)
python
def split_text_with_context(text, separators=WHITESPACE, brackets=None): res, chunk = [], [] for c, type_ in _tokens(text, brackets=brackets): if type_ == TextType.text and c in separators: res.append(''.join(chunk).strip()) chunk = [] else: chunk.append(c) res.append(''.join(chunk).strip()) return nfilter(res)
[ "def", "split_text_with_context", "(", "text", ",", "separators", "=", "WHITESPACE", ",", "brackets", "=", "None", ")", ":", "res", ",", "chunk", "=", "[", "]", ",", "[", "]", "for", "c", ",", "type_", "in", "_tokens", "(", "text", ",", "brackets", "...
Splits text at separators outside of brackets. :param text: :param separators: An iterable of single character tokens. :param brackets: :return: A `list` of non-empty chunks. .. note:: This function leaves content in brackets in the chunks.
[ "Splits", "text", "at", "separators", "outside", "of", "brackets", "." ]
7b8587ef5b56a2fc6cafaff90bc5004355c2b13f
https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/text.py#L71-L89
12,266
clld/clldutils
src/clldutils/text.py
split_text
def split_text(text, separators=re.compile('\s'), brackets=None, strip=False): """Split text along the separators unless they appear within brackets. :param separators: An iterable single characters or a compiled regex pattern. :param brackets: `dict` mapping start tokens to end tokens of what is to be \ recognized as brackets. .. note:: This function will also strip content within brackets. """ if not isinstance(separators, PATTERN_TYPE): separators = re.compile( '[{0}]'.format(''.join('\{0}'.format(c) for c in separators))) return nfilter( s.strip() if strip else s for s in separators.split(strip_brackets(text, brackets=brackets)))
python
def split_text(text, separators=re.compile('\s'), brackets=None, strip=False): if not isinstance(separators, PATTERN_TYPE): separators = re.compile( '[{0}]'.format(''.join('\{0}'.format(c) for c in separators))) return nfilter( s.strip() if strip else s for s in separators.split(strip_brackets(text, brackets=brackets)))
[ "def", "split_text", "(", "text", ",", "separators", "=", "re", ".", "compile", "(", "'\\s'", ")", ",", "brackets", "=", "None", ",", "strip", "=", "False", ")", ":", "if", "not", "isinstance", "(", "separators", ",", "PATTERN_TYPE", ")", ":", "separat...
Split text along the separators unless they appear within brackets. :param separators: An iterable single characters or a compiled regex pattern. :param brackets: `dict` mapping start tokens to end tokens of what is to be \ recognized as brackets. .. note:: This function will also strip content within brackets.
[ "Split", "text", "along", "the", "separators", "unless", "they", "appear", "within", "brackets", "." ]
7b8587ef5b56a2fc6cafaff90bc5004355c2b13f
https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/text.py#L92-L107
12,267
clld/clldutils
src/clldutils/sfm.py
Entry.get
def get(self, key, default=None): """Retrieve the first value for a marker or None.""" for k, v in self: if k == key: return v return default
python
def get(self, key, default=None): for k, v in self: if k == key: return v return default
[ "def", "get", "(", "self", ",", "key", ",", "default", "=", "None", ")", ":", "for", "k", ",", "v", "in", "self", ":", "if", "k", "==", "key", ":", "return", "v", "return", "default" ]
Retrieve the first value for a marker or None.
[ "Retrieve", "the", "first", "value", "for", "a", "marker", "or", "None", "." ]
7b8587ef5b56a2fc6cafaff90bc5004355c2b13f
https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/sfm.py#L71-L76
12,268
clld/clldutils
src/clldutils/sfm.py
SFM.read
def read(self, filename, encoding='utf-8', marker_map=None, entry_impl=Entry, entry_sep='\n\n', entry_prefix=None, keep_empty=False): """Extend the list by parsing new entries from a file. :param filename: :param encoding: :param marker_map: A dict used to map marker names. :param entry_impl: Subclass of Entry or None :param entry_sep: :param entry_prefix: """ marker_map = marker_map or {} for entry in parse( filename, encoding, entry_sep, entry_prefix or entry_sep, keep_empty=keep_empty): if entry: self.append(entry_impl([(marker_map.get(k, k), v) for k, v in entry]))
python
def read(self, filename, encoding='utf-8', marker_map=None, entry_impl=Entry, entry_sep='\n\n', entry_prefix=None, keep_empty=False): marker_map = marker_map or {} for entry in parse( filename, encoding, entry_sep, entry_prefix or entry_sep, keep_empty=keep_empty): if entry: self.append(entry_impl([(marker_map.get(k, k), v) for k, v in entry]))
[ "def", "read", "(", "self", ",", "filename", ",", "encoding", "=", "'utf-8'", ",", "marker_map", "=", "None", ",", "entry_impl", "=", "Entry", ",", "entry_sep", "=", "'\\n\\n'", ",", "entry_prefix", "=", "None", ",", "keep_empty", "=", "False", ")", ":",...
Extend the list by parsing new entries from a file. :param filename: :param encoding: :param marker_map: A dict used to map marker names. :param entry_impl: Subclass of Entry or None :param entry_sep: :param entry_prefix:
[ "Extend", "the", "list", "by", "parsing", "new", "entries", "from", "a", "file", "." ]
7b8587ef5b56a2fc6cafaff90bc5004355c2b13f
https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/sfm.py#L117-L142
12,269
clld/clldutils
src/clldutils/sfm.py
SFM.write
def write(self, filename, encoding='utf-8'): """Write the list of entries to a file. :param filename: :param encoding: :return: """ with io.open(str(filename), 'w', encoding=encoding) as fp: for entry in self: fp.write(entry.__unicode__()) fp.write('\n\n')
python
def write(self, filename, encoding='utf-8'): with io.open(str(filename), 'w', encoding=encoding) as fp: for entry in self: fp.write(entry.__unicode__()) fp.write('\n\n')
[ "def", "write", "(", "self", ",", "filename", ",", "encoding", "=", "'utf-8'", ")", ":", "with", "io", ".", "open", "(", "str", "(", "filename", ")", ",", "'w'", ",", "encoding", "=", "encoding", ")", "as", "fp", ":", "for", "entry", "in", "self", ...
Write the list of entries to a file. :param filename: :param encoding: :return:
[ "Write", "the", "list", "of", "entries", "to", "a", "file", "." ]
7b8587ef5b56a2fc6cafaff90bc5004355c2b13f
https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/sfm.py#L148-L158
12,270
clld/clldutils
src/clldutils/misc.py
data_url
def data_url(content, mimetype=None): """ Returns content encoded as base64 Data URI. :param content: bytes or str or Path :param mimetype: mimetype for :return: str object (consisting only of ASCII, though) .. seealso:: https://en.wikipedia.org/wiki/Data_URI_scheme """ if isinstance(content, pathlib.Path): if not mimetype: mimetype = guess_type(content.name)[0] with content.open('rb') as fp: content = fp.read() else: if isinstance(content, text_type): content = content.encode('utf8') return "data:{0};base64,{1}".format( mimetype or 'application/octet-stream', b64encode(content).decode())
python
def data_url(content, mimetype=None): if isinstance(content, pathlib.Path): if not mimetype: mimetype = guess_type(content.name)[0] with content.open('rb') as fp: content = fp.read() else: if isinstance(content, text_type): content = content.encode('utf8') return "data:{0};base64,{1}".format( mimetype or 'application/octet-stream', b64encode(content).decode())
[ "def", "data_url", "(", "content", ",", "mimetype", "=", "None", ")", ":", "if", "isinstance", "(", "content", ",", "pathlib", ".", "Path", ")", ":", "if", "not", "mimetype", ":", "mimetype", "=", "guess_type", "(", "content", ".", "name", ")", "[", ...
Returns content encoded as base64 Data URI. :param content: bytes or str or Path :param mimetype: mimetype for :return: str object (consisting only of ASCII, though) .. seealso:: https://en.wikipedia.org/wiki/Data_URI_scheme
[ "Returns", "content", "encoded", "as", "base64", "Data", "URI", "." ]
7b8587ef5b56a2fc6cafaff90bc5004355c2b13f
https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/misc.py#L24-L43
12,271
clld/clldutils
src/clldutils/misc.py
to_binary
def to_binary(s, encoding='utf8'): """Portable cast function. In python 2 the ``str`` function which is used to coerce objects to bytes does not accept an encoding argument, whereas python 3's ``bytes`` function requires one. :param s: object to be converted to binary_type :return: binary_type instance, representing s. """ if PY3: # pragma: no cover return s if isinstance(s, binary_type) else binary_type(s, encoding=encoding) return binary_type(s)
python
def to_binary(s, encoding='utf8'): if PY3: # pragma: no cover return s if isinstance(s, binary_type) else binary_type(s, encoding=encoding) return binary_type(s)
[ "def", "to_binary", "(", "s", ",", "encoding", "=", "'utf8'", ")", ":", "if", "PY3", ":", "# pragma: no cover", "return", "s", "if", "isinstance", "(", "s", ",", "binary_type", ")", "else", "binary_type", "(", "s", ",", "encoding", "=", "encoding", ")", ...
Portable cast function. In python 2 the ``str`` function which is used to coerce objects to bytes does not accept an encoding argument, whereas python 3's ``bytes`` function requires one. :param s: object to be converted to binary_type :return: binary_type instance, representing s.
[ "Portable", "cast", "function", "." ]
7b8587ef5b56a2fc6cafaff90bc5004355c2b13f
https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/misc.py#L61-L72
12,272
clld/clldutils
src/clldutils/misc.py
dict_merged
def dict_merged(d, _filter=None, **kw): """Update dictionary d with the items passed as kw if the value passes _filter.""" def f(s): if _filter: return _filter(s) return s is not None d = d or {} for k, v in iteritems(kw): if f(v): d[k] = v return d
python
def dict_merged(d, _filter=None, **kw): def f(s): if _filter: return _filter(s) return s is not None d = d or {} for k, v in iteritems(kw): if f(v): d[k] = v return d
[ "def", "dict_merged", "(", "d", ",", "_filter", "=", "None", ",", "*", "*", "kw", ")", ":", "def", "f", "(", "s", ")", ":", "if", "_filter", ":", "return", "_filter", "(", "s", ")", "return", "s", "is", "not", "None", "d", "=", "d", "or", "{"...
Update dictionary d with the items passed as kw if the value passes _filter.
[ "Update", "dictionary", "d", "with", "the", "items", "passed", "as", "kw", "if", "the", "value", "passes", "_filter", "." ]
7b8587ef5b56a2fc6cafaff90bc5004355c2b13f
https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/misc.py#L75-L85
12,273
clld/clldutils
src/clldutils/misc.py
xmlchars
def xmlchars(text): """Not all of UTF-8 is considered valid character data in XML ... Thus, this function can be used to remove illegal characters from ``text``. """ invalid = list(range(0x9)) invalid.extend([0xb, 0xc]) invalid.extend(range(0xe, 0x20)) return re.sub('|'.join('\\x%0.2X' % i for i in invalid), '', text)
python
def xmlchars(text): invalid = list(range(0x9)) invalid.extend([0xb, 0xc]) invalid.extend(range(0xe, 0x20)) return re.sub('|'.join('\\x%0.2X' % i for i in invalid), '', text)
[ "def", "xmlchars", "(", "text", ")", ":", "invalid", "=", "list", "(", "range", "(", "0x9", ")", ")", "invalid", ".", "extend", "(", "[", "0xb", ",", "0xc", "]", ")", "invalid", ".", "extend", "(", "range", "(", "0xe", ",", "0x20", ")", ")", "r...
Not all of UTF-8 is considered valid character data in XML ... Thus, this function can be used to remove illegal characters from ``text``.
[ "Not", "all", "of", "UTF", "-", "8", "is", "considered", "valid", "character", "data", "in", "XML", "..." ]
7b8587ef5b56a2fc6cafaff90bc5004355c2b13f
https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/misc.py#L99-L107
12,274
clld/clldutils
src/clldutils/misc.py
slug
def slug(s, remove_whitespace=True, lowercase=True): """Condensed version of s, containing only lowercase alphanumeric characters.""" res = ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn') if lowercase: res = res.lower() for c in string.punctuation: res = res.replace(c, '') res = re.sub('\s+', '' if remove_whitespace else ' ', res) res = res.encode('ascii', 'ignore').decode('ascii') assert re.match('[ A-Za-z0-9]*$', res) return res
python
def slug(s, remove_whitespace=True, lowercase=True): res = ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn') if lowercase: res = res.lower() for c in string.punctuation: res = res.replace(c, '') res = re.sub('\s+', '' if remove_whitespace else ' ', res) res = res.encode('ascii', 'ignore').decode('ascii') assert re.match('[ A-Za-z0-9]*$', res) return res
[ "def", "slug", "(", "s", ",", "remove_whitespace", "=", "True", ",", "lowercase", "=", "True", ")", ":", "res", "=", "''", ".", "join", "(", "c", "for", "c", "in", "unicodedata", ".", "normalize", "(", "'NFD'", ",", "s", ")", "if", "unicodedata", "...
Condensed version of s, containing only lowercase alphanumeric characters.
[ "Condensed", "version", "of", "s", "containing", "only", "lowercase", "alphanumeric", "characters", "." ]
7b8587ef5b56a2fc6cafaff90bc5004355c2b13f
https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/misc.py#L139-L150
12,275
clld/clldutils
src/clldutils/misc.py
encoded
def encoded(string, encoding='utf-8'): """Cast string to binary_type. :param string: six.binary_type or six.text_type :param encoding: encoding which the object is forced to :return: six.binary_type """ assert isinstance(string, string_types) or isinstance(string, binary_type) if isinstance(string, text_type): return string.encode(encoding) try: # make sure the string can be decoded in the specified encoding ... string.decode(encoding) return string except UnicodeDecodeError: # ... if not use latin1 as best guess to decode the string before encoding as # specified. return string.decode('latin1').encode(encoding)
python
def encoded(string, encoding='utf-8'): assert isinstance(string, string_types) or isinstance(string, binary_type) if isinstance(string, text_type): return string.encode(encoding) try: # make sure the string can be decoded in the specified encoding ... string.decode(encoding) return string except UnicodeDecodeError: # ... if not use latin1 as best guess to decode the string before encoding as # specified. return string.decode('latin1').encode(encoding)
[ "def", "encoded", "(", "string", ",", "encoding", "=", "'utf-8'", ")", ":", "assert", "isinstance", "(", "string", ",", "string_types", ")", "or", "isinstance", "(", "string", ",", "binary_type", ")", "if", "isinstance", "(", "string", ",", "text_type", ")...
Cast string to binary_type. :param string: six.binary_type or six.text_type :param encoding: encoding which the object is forced to :return: six.binary_type
[ "Cast", "string", "to", "binary_type", "." ]
7b8587ef5b56a2fc6cafaff90bc5004355c2b13f
https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/misc.py#L153-L170
12,276
clld/clldutils
src/clldutils/path.py
readlines
def readlines(p, encoding=None, strip=False, comment=None, normalize=None, linenumbers=False): """ Read a `list` of lines from a text file. :param p: File path (or `list` or `tuple` of text) :param encoding: Registered codec. :param strip: If `True`, strip leading and trailing whitespace. :param comment: String used as syntax to mark comment lines. When not `None`, \ commented lines will be stripped. This implies `strip=True`. :param normalize: 'NFC', 'NFKC', 'NFD', 'NFKD' :param linenumbers: return also line numbers. :return: `list` of text lines or pairs (`int`, text or `None`). """ if comment: strip = True if isinstance(p, (list, tuple)): res = [l.decode(encoding) if encoding else l for l in p] else: with Path(p).open(encoding=encoding or 'utf-8') as fp: res = fp.readlines() if strip: res = [l.strip() or None for l in res] if comment: res = [None if l and l.startswith(comment) else l for l in res] if normalize: res = [unicodedata.normalize(normalize, l) if l else l for l in res] if linenumbers: return [(n, l) for n, l in enumerate(res, 1)] return [l for l in res if l is not None]
python
def readlines(p, encoding=None, strip=False, comment=None, normalize=None, linenumbers=False): if comment: strip = True if isinstance(p, (list, tuple)): res = [l.decode(encoding) if encoding else l for l in p] else: with Path(p).open(encoding=encoding or 'utf-8') as fp: res = fp.readlines() if strip: res = [l.strip() or None for l in res] if comment: res = [None if l and l.startswith(comment) else l for l in res] if normalize: res = [unicodedata.normalize(normalize, l) if l else l for l in res] if linenumbers: return [(n, l) for n, l in enumerate(res, 1)] return [l for l in res if l is not None]
[ "def", "readlines", "(", "p", ",", "encoding", "=", "None", ",", "strip", "=", "False", ",", "comment", "=", "None", ",", "normalize", "=", "None", ",", "linenumbers", "=", "False", ")", ":", "if", "comment", ":", "strip", "=", "True", "if", "isinsta...
Read a `list` of lines from a text file. :param p: File path (or `list` or `tuple` of text) :param encoding: Registered codec. :param strip: If `True`, strip leading and trailing whitespace. :param comment: String used as syntax to mark comment lines. When not `None`, \ commented lines will be stripped. This implies `strip=True`. :param normalize: 'NFC', 'NFKC', 'NFD', 'NFKD' :param linenumbers: return also line numbers. :return: `list` of text lines or pairs (`int`, text or `None`).
[ "Read", "a", "list", "of", "lines", "from", "a", "text", "file", "." ]
7b8587ef5b56a2fc6cafaff90bc5004355c2b13f
https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/path.py#L96-L129
12,277
clld/clldutils
src/clldutils/path.py
walk
def walk(p, mode='all', **kw): """Wrapper for `os.walk`, yielding `Path` objects. :param p: root of the directory tree to walk. :param mode: 'all|dirs|files', defaulting to 'all'. :param kw: Keyword arguments are passed to `os.walk`. :return: Generator for the requested Path objects. """ for dirpath, dirnames, filenames in os.walk(as_posix(p), **kw): if mode in ('all', 'dirs'): for dirname in dirnames: yield Path(dirpath).joinpath(dirname) if mode in ('all', 'files'): for fname in filenames: yield Path(dirpath).joinpath(fname)
python
def walk(p, mode='all', **kw): for dirpath, dirnames, filenames in os.walk(as_posix(p), **kw): if mode in ('all', 'dirs'): for dirname in dirnames: yield Path(dirpath).joinpath(dirname) if mode in ('all', 'files'): for fname in filenames: yield Path(dirpath).joinpath(fname)
[ "def", "walk", "(", "p", ",", "mode", "=", "'all'", ",", "*", "*", "kw", ")", ":", "for", "dirpath", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "as_posix", "(", "p", ")", ",", "*", "*", "kw", ")", ":", "if", "mode", "in"...
Wrapper for `os.walk`, yielding `Path` objects. :param p: root of the directory tree to walk. :param mode: 'all|dirs|files', defaulting to 'all'. :param kw: Keyword arguments are passed to `os.walk`. :return: Generator for the requested Path objects.
[ "Wrapper", "for", "os", ".", "walk", "yielding", "Path", "objects", "." ]
7b8587ef5b56a2fc6cafaff90bc5004355c2b13f
https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/path.py#L148-L162
12,278
clld/clldutils
src/clldutils/source.py
Source.bibtex
def bibtex(self): """Represent the source in BibTeX format. :return: string encoding the source in BibTeX syntax. """ m = max(itertools.chain(map(len, self), [0])) fields = (" %s = {%s}" % (k.ljust(m), self[k]) for k in self) return "@%s{%s,\n%s\n}" % ( getattr(self.genre, 'value', self.genre), self.id, ",\n".join(fields))
python
def bibtex(self): m = max(itertools.chain(map(len, self), [0])) fields = (" %s = {%s}" % (k.ljust(m), self[k]) for k in self) return "@%s{%s,\n%s\n}" % ( getattr(self.genre, 'value', self.genre), self.id, ",\n".join(fields))
[ "def", "bibtex", "(", "self", ")", ":", "m", "=", "max", "(", "itertools", ".", "chain", "(", "map", "(", "len", ",", "self", ")", ",", "[", "0", "]", ")", ")", "fields", "=", "(", "\" %s = {%s}\"", "%", "(", "k", ".", "ljust", "(", "m", ")"...
Represent the source in BibTeX format. :return: string encoding the source in BibTeX syntax.
[ "Represent", "the", "source", "in", "BibTeX", "format", "." ]
7b8587ef5b56a2fc6cafaff90bc5004355c2b13f
https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/source.py#L109-L117
12,279
messagebird/python-rest-api
messagebird/client.py
Client.request
def request(self, path, method='GET', params=None, type=REST_TYPE): """Builds a request, gets a response and decodes it.""" response_text = self._get_http_client(type).request(path, method, params) if not response_text: return response_text response_json = json.loads(response_text) if 'errors' in response_json: raise (ErrorException([Error().load(e) for e in response_json['errors']])) return response_json
python
def request(self, path, method='GET', params=None, type=REST_TYPE): response_text = self._get_http_client(type).request(path, method, params) if not response_text: return response_text response_json = json.loads(response_text) if 'errors' in response_json: raise (ErrorException([Error().load(e) for e in response_json['errors']])) return response_json
[ "def", "request", "(", "self", ",", "path", ",", "method", "=", "'GET'", ",", "params", "=", "None", ",", "type", "=", "REST_TYPE", ")", ":", "response_text", "=", "self", ".", "_get_http_client", "(", "type", ")", ".", "request", "(", "path", ",", "...
Builds a request, gets a response and decodes it.
[ "Builds", "a", "request", "gets", "a", "response", "and", "decodes", "it", "." ]
fb7864f178135f92d09af803bee93270e99f3963
https://github.com/messagebird/python-rest-api/blob/fb7864f178135f92d09af803bee93270e99f3963/messagebird/client.py#L52-L64
12,280
messagebird/python-rest-api
messagebird/client.py
Client.message_create
def message_create(self, originator, recipients, body, params=None): """Create a new message.""" if params is None: params = {} if type(recipients) == list: recipients = ','.join(recipients) params.update({'originator': originator, 'body': body, 'recipients': recipients}) return Message().load(self.request('messages', 'POST', params))
python
def message_create(self, originator, recipients, body, params=None): if params is None: params = {} if type(recipients) == list: recipients = ','.join(recipients) params.update({'originator': originator, 'body': body, 'recipients': recipients}) return Message().load(self.request('messages', 'POST', params))
[ "def", "message_create", "(", "self", ",", "originator", ",", "recipients", ",", "body", ",", "params", "=", "None", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "if", "type", "(", "recipients", ")", "==", "list", ":", "recip...
Create a new message.
[ "Create", "a", "new", "message", "." ]
fb7864f178135f92d09af803bee93270e99f3963
https://github.com/messagebird/python-rest-api/blob/fb7864f178135f92d09af803bee93270e99f3963/messagebird/client.py#L100-L107
12,281
messagebird/python-rest-api
messagebird/client.py
Client.voice_message_create
def voice_message_create(self, recipients, body, params=None): """Create a new voice message.""" if params is None: params = {} if type(recipients) == list: recipients = ','.join(recipients) params.update({'recipients': recipients, 'body': body}) return VoiceMessage().load(self.request('voicemessages', 'POST', params))
python
def voice_message_create(self, recipients, body, params=None): if params is None: params = {} if type(recipients) == list: recipients = ','.join(recipients) params.update({'recipients': recipients, 'body': body}) return VoiceMessage().load(self.request('voicemessages', 'POST', params))
[ "def", "voice_message_create", "(", "self", ",", "recipients", ",", "body", ",", "params", "=", "None", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "if", "type", "(", "recipients", ")", "==", "list", ":", "recipients", "=", ...
Create a new voice message.
[ "Create", "a", "new", "voice", "message", "." ]
fb7864f178135f92d09af803bee93270e99f3963
https://github.com/messagebird/python-rest-api/blob/fb7864f178135f92d09af803bee93270e99f3963/messagebird/client.py#L117-L124
12,282
messagebird/python-rest-api
messagebird/client.py
Client.lookup
def lookup(self, phonenumber, params=None): """Do a new lookup.""" if params is None: params = {} return Lookup().load(self.request('lookup/' + str(phonenumber), 'GET', params))
python
def lookup(self, phonenumber, params=None): if params is None: params = {} return Lookup().load(self.request('lookup/' + str(phonenumber), 'GET', params))
[ "def", "lookup", "(", "self", ",", "phonenumber", ",", "params", "=", "None", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "return", "Lookup", "(", ")", ".", "load", "(", "self", ".", "request", "(", "'lookup/'", "+", "str"...
Do a new lookup.
[ "Do", "a", "new", "lookup", "." ]
fb7864f178135f92d09af803bee93270e99f3963
https://github.com/messagebird/python-rest-api/blob/fb7864f178135f92d09af803bee93270e99f3963/messagebird/client.py#L126-L129
12,283
messagebird/python-rest-api
messagebird/client.py
Client.lookup_hlr
def lookup_hlr(self, phonenumber, params=None): """Retrieve the information of a specific HLR lookup.""" if params is None: params = {} return HLR().load(self.request('lookup/' + str(phonenumber) + '/hlr', 'GET', params))
python
def lookup_hlr(self, phonenumber, params=None): if params is None: params = {} return HLR().load(self.request('lookup/' + str(phonenumber) + '/hlr', 'GET', params))
[ "def", "lookup_hlr", "(", "self", ",", "phonenumber", ",", "params", "=", "None", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "return", "HLR", "(", ")", ".", "load", "(", "self", ".", "request", "(", "'lookup/'", "+", "str...
Retrieve the information of a specific HLR lookup.
[ "Retrieve", "the", "information", "of", "a", "specific", "HLR", "lookup", "." ]
fb7864f178135f92d09af803bee93270e99f3963
https://github.com/messagebird/python-rest-api/blob/fb7864f178135f92d09af803bee93270e99f3963/messagebird/client.py#L131-L134
12,284
messagebird/python-rest-api
messagebird/client.py
Client.verify_create
def verify_create(self, recipient, params=None): """Create a new verification.""" if params is None: params = {} params.update({'recipient': recipient}) return Verify().load(self.request('verify', 'POST', params))
python
def verify_create(self, recipient, params=None): if params is None: params = {} params.update({'recipient': recipient}) return Verify().load(self.request('verify', 'POST', params))
[ "def", "verify_create", "(", "self", ",", "recipient", ",", "params", "=", "None", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "params", ".", "update", "(", "{", "'recipient'", ":", "recipient", "}", ")", "return", "Verify", ...
Create a new verification.
[ "Create", "a", "new", "verification", "." ]
fb7864f178135f92d09af803bee93270e99f3963
https://github.com/messagebird/python-rest-api/blob/fb7864f178135f92d09af803bee93270e99f3963/messagebird/client.py#L145-L149
12,285
messagebird/python-rest-api
messagebird/client.py
Client.verify_verify
def verify_verify(self, id, token): """Verify the token of a specific verification.""" return Verify().load(self.request('verify/' + str(id), params={'token': token}))
python
def verify_verify(self, id, token): return Verify().load(self.request('verify/' + str(id), params={'token': token}))
[ "def", "verify_verify", "(", "self", ",", "id", ",", "token", ")", ":", "return", "Verify", "(", ")", ".", "load", "(", "self", ".", "request", "(", "'verify/'", "+", "str", "(", "id", ")", ",", "params", "=", "{", "'token'", ":", "token", "}", "...
Verify the token of a specific verification.
[ "Verify", "the", "token", "of", "a", "specific", "verification", "." ]
fb7864f178135f92d09af803bee93270e99f3963
https://github.com/messagebird/python-rest-api/blob/fb7864f178135f92d09af803bee93270e99f3963/messagebird/client.py#L151-L153
12,286
messagebird/python-rest-api
messagebird/base_list.py
BaseList.items
def items(self, value): """Create typed objects from the dicts.""" items = [] for item in value: items.append(self.itemType().load(item)) self._items = items
python
def items(self, value): items = [] for item in value: items.append(self.itemType().load(item)) self._items = items
[ "def", "items", "(", "self", ",", "value", ")", ":", "items", "=", "[", "]", "for", "item", "in", "value", ":", "items", ".", "append", "(", "self", ".", "itemType", "(", ")", ".", "load", "(", "item", ")", ")", "self", ".", "_items", "=", "ite...
Create typed objects from the dicts.
[ "Create", "typed", "objects", "from", "the", "dicts", "." ]
fb7864f178135f92d09af803bee93270e99f3963
https://github.com/messagebird/python-rest-api/blob/fb7864f178135f92d09af803bee93270e99f3963/messagebird/base_list.py#L39-L45
12,287
messagebird/python-rest-api
messagebird/http_client.py
HttpClient.request
def request(self, path, method='GET', params=None): """Builds a request and gets a response.""" if params is None: params = {} url = urljoin(self.endpoint, path) headers = { 'Accept': 'application/json', 'Authorization': 'AccessKey ' + self.access_key, 'User-Agent': self.user_agent, 'Content-Type': 'application/json' } if method == 'DELETE': response = requests.delete(url, verify=True, headers=headers, data=json.dumps(params)) elif method == 'GET': response = requests.get(url, verify=True, headers=headers, params=params) elif method == 'PATCH': response = requests.patch(url, verify=True, headers=headers, data=json.dumps(params)) elif method == 'POST': response = requests.post(url, verify=True, headers=headers, data=json.dumps(params)) elif method == 'PUT': response = requests.put(url, verify=True, headers=headers, data=json.dumps(params)) else: raise ValueError(str(method) + ' is not a supported HTTP method') if response.status_code in self.__supported_status_codes: response_text = response.text else: response.raise_for_status() return response_text
python
def request(self, path, method='GET', params=None): if params is None: params = {} url = urljoin(self.endpoint, path) headers = { 'Accept': 'application/json', 'Authorization': 'AccessKey ' + self.access_key, 'User-Agent': self.user_agent, 'Content-Type': 'application/json' } if method == 'DELETE': response = requests.delete(url, verify=True, headers=headers, data=json.dumps(params)) elif method == 'GET': response = requests.get(url, verify=True, headers=headers, params=params) elif method == 'PATCH': response = requests.patch(url, verify=True, headers=headers, data=json.dumps(params)) elif method == 'POST': response = requests.post(url, verify=True, headers=headers, data=json.dumps(params)) elif method == 'PUT': response = requests.put(url, verify=True, headers=headers, data=json.dumps(params)) else: raise ValueError(str(method) + ' is not a supported HTTP method') if response.status_code in self.__supported_status_codes: response_text = response.text else: response.raise_for_status() return response_text
[ "def", "request", "(", "self", ",", "path", ",", "method", "=", "'GET'", ",", "params", "=", "None", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "url", "=", "urljoin", "(", "self", ".", "endpoint", ",", "path", ")", "hea...
Builds a request and gets a response.
[ "Builds", "a", "request", "and", "gets", "a", "response", "." ]
fb7864f178135f92d09af803bee93270e99f3963
https://github.com/messagebird/python-rest-api/blob/fb7864f178135f92d09af803bee93270e99f3963/messagebird/http_client.py#L20-L50
12,288
sagemath/cysignals
src/scripts/cysignals-CSI-helper.py
cython_debug_files
def cython_debug_files(): """ Cython extra debug information files """ # Search all subdirectories of sys.path directories for a # "cython_debug" directory. Note that sys_path is a variable set by # cysignals-CSI. It may differ from sys.path if GDB is run with a # different Python interpreter. files = [] for path in sys_path: # noqa pattern = os.path.join(path, '*', 'cython_debug', 'cython_debug_info_*') files.extend(glob.glob(pattern)) return files
python
def cython_debug_files(): # Search all subdirectories of sys.path directories for a # "cython_debug" directory. Note that sys_path is a variable set by # cysignals-CSI. It may differ from sys.path if GDB is run with a # different Python interpreter. files = [] for path in sys_path: # noqa pattern = os.path.join(path, '*', 'cython_debug', 'cython_debug_info_*') files.extend(glob.glob(pattern)) return files
[ "def", "cython_debug_files", "(", ")", ":", "# Search all subdirectories of sys.path directories for a", "# \"cython_debug\" directory. Note that sys_path is a variable set by", "# cysignals-CSI. It may differ from sys.path if GDB is run with a", "# different Python interpreter.", "files", "=", ...
Cython extra debug information files
[ "Cython", "extra", "debug", "information", "files" ]
a8a8ad789332996e7e094188d65884982e899a65
https://github.com/sagemath/cysignals/blob/a8a8ad789332996e7e094188d65884982e899a65/src/scripts/cysignals-CSI-helper.py#L34-L46
12,289
BerkeleyAutomation/perception
perception/colorized_phoxi_sensor.py
ColorizedPhoXiSensor._colorize
def _colorize(self, depth_im, color_im): """Colorize a depth image from the PhoXi using a color image from the webcam. Parameters ---------- depth_im : DepthImage The PhoXi depth image. color_im : ColorImage Corresponding color image. Returns ------- ColorImage A colorized image corresponding to the PhoXi depth image. """ # Project the point cloud into the webcam's frame target_shape = (depth_im.data.shape[0], depth_im.data.shape[1], 3) pc_depth = self._phoxi.ir_intrinsics.deproject(depth_im) pc_color = self._T_webcam_world.inverse().dot(self._T_phoxi_world).apply(pc_depth) # Sort the points by their distance from the webcam's apeture pc_data = pc_color.data.T dists = np.linalg.norm(pc_data, axis=1) order = np.argsort(dists) pc_data = pc_data[order] pc_color = PointCloud(pc_data.T, frame=self._webcam.color_intrinsics.frame) sorted_dists = dists[order] sorted_depths = depth_im.data.flatten()[order] # Generate image coordinates for each sorted point icds = self._webcam.color_intrinsics.project(pc_color).data.T # Create mask for points that are masked by others rounded_icds = np.array(icds / 3.0, dtype=np.uint32) unique_icds, unique_inds, unique_inv = np.unique(rounded_icds, axis=0, return_index=True, return_inverse=True) icd_depths = sorted_dists[unique_inds] min_depths_pp = icd_depths[unique_inv] depth_delta_mask = np.abs(min_depths_pp - sorted_dists) < 5e-3 # Create mask for points with missing depth or that lie outside the image valid_mask = np.logical_and(np.logical_and(icds[:,0] >= 0, icds[:,0] < self._webcam.color_intrinsics.width), np.logical_and(icds[:,1] >= 0, icds[:,1] < self._webcam.color_intrinsics.height)) valid_mask = np.logical_and(valid_mask, sorted_depths != 0.0) valid_mask = np.logical_and(valid_mask, depth_delta_mask) valid_icds = icds[valid_mask] colors = color_im.data[valid_icds[:,1],valid_icds[:,0],:] color_im_data = np.zeros((target_shape[0] * target_shape[1], target_shape[2]), dtype=np.uint8) color_im_data[valid_mask] = colors color_im_data[order] = color_im_data.copy() color_im_data = color_im_data.reshape(target_shape) return ColorImage(color_im_data, frame=self._frame)
python
def _colorize(self, depth_im, color_im): # Project the point cloud into the webcam's frame target_shape = (depth_im.data.shape[0], depth_im.data.shape[1], 3) pc_depth = self._phoxi.ir_intrinsics.deproject(depth_im) pc_color = self._T_webcam_world.inverse().dot(self._T_phoxi_world).apply(pc_depth) # Sort the points by their distance from the webcam's apeture pc_data = pc_color.data.T dists = np.linalg.norm(pc_data, axis=1) order = np.argsort(dists) pc_data = pc_data[order] pc_color = PointCloud(pc_data.T, frame=self._webcam.color_intrinsics.frame) sorted_dists = dists[order] sorted_depths = depth_im.data.flatten()[order] # Generate image coordinates for each sorted point icds = self._webcam.color_intrinsics.project(pc_color).data.T # Create mask for points that are masked by others rounded_icds = np.array(icds / 3.0, dtype=np.uint32) unique_icds, unique_inds, unique_inv = np.unique(rounded_icds, axis=0, return_index=True, return_inverse=True) icd_depths = sorted_dists[unique_inds] min_depths_pp = icd_depths[unique_inv] depth_delta_mask = np.abs(min_depths_pp - sorted_dists) < 5e-3 # Create mask for points with missing depth or that lie outside the image valid_mask = np.logical_and(np.logical_and(icds[:,0] >= 0, icds[:,0] < self._webcam.color_intrinsics.width), np.logical_and(icds[:,1] >= 0, icds[:,1] < self._webcam.color_intrinsics.height)) valid_mask = np.logical_and(valid_mask, sorted_depths != 0.0) valid_mask = np.logical_and(valid_mask, depth_delta_mask) valid_icds = icds[valid_mask] colors = color_im.data[valid_icds[:,1],valid_icds[:,0],:] color_im_data = np.zeros((target_shape[0] * target_shape[1], target_shape[2]), dtype=np.uint8) color_im_data[valid_mask] = colors color_im_data[order] = color_im_data.copy() color_im_data = color_im_data.reshape(target_shape) return ColorImage(color_im_data, frame=self._frame)
[ "def", "_colorize", "(", "self", ",", "depth_im", ",", "color_im", ")", ":", "# Project the point cloud into the webcam's frame", "target_shape", "=", "(", "depth_im", ".", "data", ".", "shape", "[", "0", "]", ",", "depth_im", ".", "data", ".", "shape", "[", ...
Colorize a depth image from the PhoXi using a color image from the webcam. Parameters ---------- depth_im : DepthImage The PhoXi depth image. color_im : ColorImage Corresponding color image. Returns ------- ColorImage A colorized image corresponding to the PhoXi depth image.
[ "Colorize", "a", "depth", "image", "from", "the", "PhoXi", "using", "a", "color", "image", "from", "the", "webcam", "." ]
03d9b37dd6b66896cdfe173905c9413c8c3c5df6
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/colorized_phoxi_sensor.py#L145-L196
12,290
BerkeleyAutomation/perception
perception/rgbd_sensors.py
RgbdSensorFactory.sensor
def sensor(sensor_type, cfg): """ Creates a camera sensor of the specified type. Parameters ---------- sensor_type : :obj:`str` the type of the sensor (real or virtual) cfg : :obj:`YamlConfig` dictionary of parameters for sensor initialization """ sensor_type = sensor_type.lower() if sensor_type == 'kinect2': s = Kinect2Sensor(packet_pipeline_mode=cfg['pipeline_mode'], device_num=cfg['device_num'], frame=cfg['frame']) elif sensor_type == 'bridged_kinect2': s = KinectSensorBridged(quality=cfg['quality'], frame=cfg['frame']) elif sensor_type == 'primesense': flip_images = True if 'flip_images' in cfg.keys(): flip_images = cfg['flip_images'] s = PrimesenseSensor(auto_white_balance=cfg['auto_white_balance'], flip_images=flip_images, frame=cfg['frame']) elif sensor_type == 'virtual': s = VirtualSensor(cfg['image_dir'], frame=cfg['frame']) elif sensor_type == 'tensor_dataset': s = TensorDatasetVirtualSensor(cfg['dataset_dir'], frame=cfg['frame']) elif sensor_type == 'primesense_ros': s = PrimesenseSensor_ROS(frame=cfg['frame']) elif sensor_type == 'ensenso': s = EnsensoSensor(frame=cfg['frame']) elif sensor_type == 'phoxi': s = PhoXiSensor(frame=cfg['frame'], device_name=cfg['device_name'], size=cfg['size']) elif sensor_type == 'webcam': s = WebcamSensor(frame=cfg['frame'], device_id=cfg['device_id']) elif sensor_type == 'colorized_phoxi': s = ColorizedPhoXiSensor(frame=cfg['frame'], phoxi_config=cfg['phoxi_config'], webcam_config=cfg['webcam_config'], calib_dir=cfg['calib_dir']) elif sensor_type == 'realsense': s = RealSenseSensor( cam_id=cfg['cam_id'], filter_depth=cfg['filter_depth'], frame=cfg['frame'], ) else: raise ValueError('RGBD sensor type %s not supported' %(sensor_type)) return s
python
def sensor(sensor_type, cfg): sensor_type = sensor_type.lower() if sensor_type == 'kinect2': s = Kinect2Sensor(packet_pipeline_mode=cfg['pipeline_mode'], device_num=cfg['device_num'], frame=cfg['frame']) elif sensor_type == 'bridged_kinect2': s = KinectSensorBridged(quality=cfg['quality'], frame=cfg['frame']) elif sensor_type == 'primesense': flip_images = True if 'flip_images' in cfg.keys(): flip_images = cfg['flip_images'] s = PrimesenseSensor(auto_white_balance=cfg['auto_white_balance'], flip_images=flip_images, frame=cfg['frame']) elif sensor_type == 'virtual': s = VirtualSensor(cfg['image_dir'], frame=cfg['frame']) elif sensor_type == 'tensor_dataset': s = TensorDatasetVirtualSensor(cfg['dataset_dir'], frame=cfg['frame']) elif sensor_type == 'primesense_ros': s = PrimesenseSensor_ROS(frame=cfg['frame']) elif sensor_type == 'ensenso': s = EnsensoSensor(frame=cfg['frame']) elif sensor_type == 'phoxi': s = PhoXiSensor(frame=cfg['frame'], device_name=cfg['device_name'], size=cfg['size']) elif sensor_type == 'webcam': s = WebcamSensor(frame=cfg['frame'], device_id=cfg['device_id']) elif sensor_type == 'colorized_phoxi': s = ColorizedPhoXiSensor(frame=cfg['frame'], phoxi_config=cfg['phoxi_config'], webcam_config=cfg['webcam_config'], calib_dir=cfg['calib_dir']) elif sensor_type == 'realsense': s = RealSenseSensor( cam_id=cfg['cam_id'], filter_depth=cfg['filter_depth'], frame=cfg['frame'], ) else: raise ValueError('RGBD sensor type %s not supported' %(sensor_type)) return s
[ "def", "sensor", "(", "sensor_type", ",", "cfg", ")", ":", "sensor_type", "=", "sensor_type", ".", "lower", "(", ")", "if", "sensor_type", "==", "'kinect2'", ":", "s", "=", "Kinect2Sensor", "(", "packet_pipeline_mode", "=", "cfg", "[", "'pipeline_mode'", "]"...
Creates a camera sensor of the specified type. Parameters ---------- sensor_type : :obj:`str` the type of the sensor (real or virtual) cfg : :obj:`YamlConfig` dictionary of parameters for sensor initialization
[ "Creates", "a", "camera", "sensor", "of", "the", "specified", "type", "." ]
03d9b37dd6b66896cdfe173905c9413c8c3c5df6
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/rgbd_sensors.py#L11-L63
12,291
BerkeleyAutomation/perception
perception/feature_matcher.py
FeatureMatcher.get_point_index
def get_point_index(point, all_points, eps = 1e-4): """ Get the index of a point in an array """ inds = np.where(np.linalg.norm(point - all_points, axis=1) < eps) if inds[0].shape[0] == 0: return -1 return inds[0][0]
python
def get_point_index(point, all_points, eps = 1e-4): inds = np.where(np.linalg.norm(point - all_points, axis=1) < eps) if inds[0].shape[0] == 0: return -1 return inds[0][0]
[ "def", "get_point_index", "(", "point", ",", "all_points", ",", "eps", "=", "1e-4", ")", ":", "inds", "=", "np", ".", "where", "(", "np", ".", "linalg", ".", "norm", "(", "point", "-", "all_points", ",", "axis", "=", "1", ")", "<", "eps", ")", "i...
Get the index of a point in an array
[ "Get", "the", "index", "of", "a", "point", "in", "an", "array" ]
03d9b37dd6b66896cdfe173905c9413c8c3c5df6
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/feature_matcher.py#L111-L116
12,292
BerkeleyAutomation/perception
perception/feature_matcher.py
RawDistanceFeatureMatcher.match
def match(self, source_obj_features, target_obj_features): """ Matches features between two graspable objects based on a full distance matrix. Parameters ---------- source_obj_features : :obj:`BagOfFeatures` bag of the source objects features target_obj_features : :obj:`BagOfFeatures` bag of the target objects features Returns ------- corrs : :obj:`Correspondences` the correspondences between source and target """ if not isinstance(source_obj_features, f.BagOfFeatures): raise ValueError('Must supply source bag of object features') if not isinstance(target_obj_features, f.BagOfFeatures): raise ValueError('Must supply target bag of object features') # source feature descriptors and keypoints source_descriptors = source_obj_features.descriptors target_descriptors = target_obj_features.descriptors source_keypoints = source_obj_features.keypoints target_keypoints = target_obj_features.keypoints #calculate distance between this model's descriptors and each of the other_model's descriptors dists = spatial.distance.cdist(source_descriptors, target_descriptors) #calculate the indices of the target_model that minimize the distance to the descriptors in this model source_closest_descriptors = dists.argmin(axis=1) target_closest_descriptors = dists.argmin(axis=0) match_indices = [] source_matched_points = np.zeros((0,3)) target_matched_points = np.zeros((0,3)) #calculate which points/indices the closest descriptors correspond to for i, j in enumerate(source_closest_descriptors): # for now, only keep correspondences that are a 2-way match if target_closest_descriptors[j] == i: match_indices.append(j) source_matched_points = np.r_[source_matched_points, source_keypoints[i:i+1, :]] target_matched_points = np.r_[target_matched_points, target_keypoints[j:j+1, :]] else: match_indices.append(-1) return Correspondences(match_indices, source_matched_points, target_matched_points)
python
def match(self, source_obj_features, target_obj_features): if not isinstance(source_obj_features, f.BagOfFeatures): raise ValueError('Must supply source bag of object features') if not isinstance(target_obj_features, f.BagOfFeatures): raise ValueError('Must supply target bag of object features') # source feature descriptors and keypoints source_descriptors = source_obj_features.descriptors target_descriptors = target_obj_features.descriptors source_keypoints = source_obj_features.keypoints target_keypoints = target_obj_features.keypoints #calculate distance between this model's descriptors and each of the other_model's descriptors dists = spatial.distance.cdist(source_descriptors, target_descriptors) #calculate the indices of the target_model that minimize the distance to the descriptors in this model source_closest_descriptors = dists.argmin(axis=1) target_closest_descriptors = dists.argmin(axis=0) match_indices = [] source_matched_points = np.zeros((0,3)) target_matched_points = np.zeros((0,3)) #calculate which points/indices the closest descriptors correspond to for i, j in enumerate(source_closest_descriptors): # for now, only keep correspondences that are a 2-way match if target_closest_descriptors[j] == i: match_indices.append(j) source_matched_points = np.r_[source_matched_points, source_keypoints[i:i+1, :]] target_matched_points = np.r_[target_matched_points, target_keypoints[j:j+1, :]] else: match_indices.append(-1) return Correspondences(match_indices, source_matched_points, target_matched_points)
[ "def", "match", "(", "self", ",", "source_obj_features", ",", "target_obj_features", ")", ":", "if", "not", "isinstance", "(", "source_obj_features", ",", "f", ".", "BagOfFeatures", ")", ":", "raise", "ValueError", "(", "'Must supply source bag of object features'", ...
Matches features between two graspable objects based on a full distance matrix. Parameters ---------- source_obj_features : :obj:`BagOfFeatures` bag of the source objects features target_obj_features : :obj:`BagOfFeatures` bag of the target objects features Returns ------- corrs : :obj:`Correspondences` the correspondences between source and target
[ "Matches", "features", "between", "two", "graspable", "objects", "based", "on", "a", "full", "distance", "matrix", "." ]
03d9b37dd6b66896cdfe173905c9413c8c3c5df6
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/feature_matcher.py#L126-L173
12,293
BerkeleyAutomation/perception
perception/feature_matcher.py
PointToPlaneFeatureMatcher.match
def match(self, source_points, target_points, source_normals, target_normals): """ Matches points between two point-normal sets. Uses the closest ip to choose matches, with distance for thresholding only. Parameters ---------- source_point_cloud : Nx3 :obj:`numpy.ndarray` source object points target_point_cloud : Nx3 :obj:`numpy.ndarray` target object points source_normal_cloud : Nx3 :obj:`numpy.ndarray` source object outward-pointing normals target_normal_cloud : Nx3 :obj`numpy.ndarray` target object outward-pointing normals Returns ------- :obj`Correspondences` the correspondences between source and target """ # compute the distances and inner products between the point sets dists = ssd.cdist(source_points, target_points, 'euclidean') ip = source_normals.dot(target_normals.T) # abs because we don't have correct orientations source_ip = source_points.dot(target_normals.T) target_ip = target_points.dot(target_normals.T) target_ip = np.diag(target_ip) target_ip = np.tile(target_ip, [source_points.shape[0], 1]) abs_diff = np.abs(source_ip - target_ip) # difference in inner products # mark invalid correspondences invalid_dists = np.where(dists > self.dist_thresh_) abs_diff[invalid_dists[0], invalid_dists[1]] = np.inf invalid_norms = np.where(ip < self.norm_thresh_) abs_diff[invalid_norms[0], invalid_norms[1]] = np.inf # choose the closest matches match_indices = np.argmin(abs_diff, axis=1) match_vals = np.min(abs_diff, axis=1) invalid_matches = np.where(match_vals == np.inf) match_indices[invalid_matches[0]] = -1 return NormalCorrespondences(match_indices, source_points, target_points, source_normals, target_normals)
python
def match(self, source_points, target_points, source_normals, target_normals): # compute the distances and inner products between the point sets dists = ssd.cdist(source_points, target_points, 'euclidean') ip = source_normals.dot(target_normals.T) # abs because we don't have correct orientations source_ip = source_points.dot(target_normals.T) target_ip = target_points.dot(target_normals.T) target_ip = np.diag(target_ip) target_ip = np.tile(target_ip, [source_points.shape[0], 1]) abs_diff = np.abs(source_ip - target_ip) # difference in inner products # mark invalid correspondences invalid_dists = np.where(dists > self.dist_thresh_) abs_diff[invalid_dists[0], invalid_dists[1]] = np.inf invalid_norms = np.where(ip < self.norm_thresh_) abs_diff[invalid_norms[0], invalid_norms[1]] = np.inf # choose the closest matches match_indices = np.argmin(abs_diff, axis=1) match_vals = np.min(abs_diff, axis=1) invalid_matches = np.where(match_vals == np.inf) match_indices[invalid_matches[0]] = -1 return NormalCorrespondences(match_indices, source_points, target_points, source_normals, target_normals)
[ "def", "match", "(", "self", ",", "source_points", ",", "target_points", ",", "source_normals", ",", "target_normals", ")", ":", "# compute the distances and inner products between the point sets", "dists", "=", "ssd", ".", "cdist", "(", "source_points", ",", "target_po...
Matches points between two point-normal sets. Uses the closest ip to choose matches, with distance for thresholding only. Parameters ---------- source_point_cloud : Nx3 :obj:`numpy.ndarray` source object points target_point_cloud : Nx3 :obj:`numpy.ndarray` target object points source_normal_cloud : Nx3 :obj:`numpy.ndarray` source object outward-pointing normals target_normal_cloud : Nx3 :obj`numpy.ndarray` target object outward-pointing normals Returns ------- :obj`Correspondences` the correspondences between source and target
[ "Matches", "points", "between", "two", "point", "-", "normal", "sets", ".", "Uses", "the", "closest", "ip", "to", "choose", "matches", "with", "distance", "for", "thresholding", "only", "." ]
03d9b37dd6b66896cdfe173905c9413c8c3c5df6
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/feature_matcher.py#L190-L231
12,294
BerkeleyAutomation/perception
perception/realsense_sensor.py
RealSenseSensor._config_pipe
def _config_pipe(self): """Configures the pipeline to stream color and depth. """ self._cfg.enable_device(self.id) # configure the color stream self._cfg.enable_stream( rs.stream.color, RealSenseSensor.COLOR_IM_WIDTH, RealSenseSensor.COLOR_IM_HEIGHT, rs.format.bgr8, RealSenseSensor.FPS ) # configure the depth stream self._cfg.enable_stream( rs.stream.depth, RealSenseSensor.DEPTH_IM_WIDTH, 360 if self._depth_align else RealSenseSensor.DEPTH_IM_HEIGHT, rs.format.z16, RealSenseSensor.FPS )
python
def _config_pipe(self): self._cfg.enable_device(self.id) # configure the color stream self._cfg.enable_stream( rs.stream.color, RealSenseSensor.COLOR_IM_WIDTH, RealSenseSensor.COLOR_IM_HEIGHT, rs.format.bgr8, RealSenseSensor.FPS ) # configure the depth stream self._cfg.enable_stream( rs.stream.depth, RealSenseSensor.DEPTH_IM_WIDTH, 360 if self._depth_align else RealSenseSensor.DEPTH_IM_HEIGHT, rs.format.z16, RealSenseSensor.FPS )
[ "def", "_config_pipe", "(", "self", ")", ":", "self", ".", "_cfg", ".", "enable_device", "(", "self", ".", "id", ")", "# configure the color stream", "self", ".", "_cfg", ".", "enable_stream", "(", "rs", ".", "stream", ".", "color", ",", "RealSenseSensor", ...
Configures the pipeline to stream color and depth.
[ "Configures", "the", "pipeline", "to", "stream", "color", "and", "depth", "." ]
03d9b37dd6b66896cdfe173905c9413c8c3c5df6
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/realsense_sensor.py#L79-L100
12,295
BerkeleyAutomation/perception
perception/realsense_sensor.py
RealSenseSensor._set_depth_scale
def _set_depth_scale(self): """Retrieve the scale of the depth sensor. """ sensor = self._profile.get_device().first_depth_sensor() self._depth_scale = sensor.get_depth_scale()
python
def _set_depth_scale(self): sensor = self._profile.get_device().first_depth_sensor() self._depth_scale = sensor.get_depth_scale()
[ "def", "_set_depth_scale", "(", "self", ")", ":", "sensor", "=", "self", ".", "_profile", ".", "get_device", "(", ")", ".", "first_depth_sensor", "(", ")", "self", ".", "_depth_scale", "=", "sensor", ".", "get_depth_scale", "(", ")" ]
Retrieve the scale of the depth sensor.
[ "Retrieve", "the", "scale", "of", "the", "depth", "sensor", "." ]
03d9b37dd6b66896cdfe173905c9413c8c3c5df6
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/realsense_sensor.py#L102-L106
12,296
BerkeleyAutomation/perception
perception/realsense_sensor.py
RealSenseSensor._set_intrinsics
def _set_intrinsics(self): """Read the intrinsics matrix from the stream. """ strm = self._profile.get_stream(rs.stream.color) obj = strm.as_video_stream_profile().get_intrinsics() self._intrinsics[0, 0] = obj.fx self._intrinsics[1, 1] = obj.fy self._intrinsics[0, 2] = obj.ppx self._intrinsics[1, 2] = obj.ppy
python
def _set_intrinsics(self): strm = self._profile.get_stream(rs.stream.color) obj = strm.as_video_stream_profile().get_intrinsics() self._intrinsics[0, 0] = obj.fx self._intrinsics[1, 1] = obj.fy self._intrinsics[0, 2] = obj.ppx self._intrinsics[1, 2] = obj.ppy
[ "def", "_set_intrinsics", "(", "self", ")", ":", "strm", "=", "self", ".", "_profile", ".", "get_stream", "(", "rs", ".", "stream", ".", "color", ")", "obj", "=", "strm", ".", "as_video_stream_profile", "(", ")", ".", "get_intrinsics", "(", ")", "self", ...
Read the intrinsics matrix from the stream.
[ "Read", "the", "intrinsics", "matrix", "from", "the", "stream", "." ]
03d9b37dd6b66896cdfe173905c9413c8c3c5df6
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/realsense_sensor.py#L108-L116
12,297
BerkeleyAutomation/perception
perception/realsense_sensor.py
RealSenseSensor._read_color_and_depth_image
def _read_color_and_depth_image(self): """Read a color and depth image from the device. """ frames = self._pipe.wait_for_frames() if self._depth_align: frames = self._align.process(frames) depth_frame = frames.get_depth_frame() color_frame = frames.get_color_frame() if not depth_frame or not color_frame: logging.warning('Could not retrieve frames.') return None, None if self._filter_depth: depth_frame = self._filter_depth_frame(depth_frame) # convert to numpy arrays depth_image = self._to_numpy(depth_frame, np.float32) color_image = self._to_numpy(color_frame, np.uint8) # convert depth to meters depth_image *= self._depth_scale # bgr to rgb color_image = color_image[..., ::-1] depth = DepthImage(depth_image, frame=self._frame) color = ColorImage(color_image, frame=self._frame) return color, depth
python
def _read_color_and_depth_image(self): frames = self._pipe.wait_for_frames() if self._depth_align: frames = self._align.process(frames) depth_frame = frames.get_depth_frame() color_frame = frames.get_color_frame() if not depth_frame or not color_frame: logging.warning('Could not retrieve frames.') return None, None if self._filter_depth: depth_frame = self._filter_depth_frame(depth_frame) # convert to numpy arrays depth_image = self._to_numpy(depth_frame, np.float32) color_image = self._to_numpy(color_frame, np.uint8) # convert depth to meters depth_image *= self._depth_scale # bgr to rgb color_image = color_image[..., ::-1] depth = DepthImage(depth_image, frame=self._frame) color = ColorImage(color_image, frame=self._frame) return color, depth
[ "def", "_read_color_and_depth_image", "(", "self", ")", ":", "frames", "=", "self", ".", "_pipe", ".", "wait_for_frames", "(", ")", "if", "self", ".", "_depth_align", ":", "frames", "=", "self", ".", "_align", ".", "process", "(", "frames", ")", "depth_fra...
Read a color and depth image from the device.
[ "Read", "a", "color", "and", "depth", "image", "from", "the", "device", "." ]
03d9b37dd6b66896cdfe173905c9413c8c3c5df6
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/realsense_sensor.py#L200-L229
12,298
BerkeleyAutomation/perception
perception/ensenso_sensor.py
EnsensoSensor._set_format
def _set_format(self, msg): """ Set the buffer formatting. """ num_points = msg.height * msg.width self._format = '<' + num_points * 'ffff'
python
def _set_format(self, msg): num_points = msg.height * msg.width self._format = '<' + num_points * 'ffff'
[ "def", "_set_format", "(", "self", ",", "msg", ")", ":", "num_points", "=", "msg", ".", "height", "*", "msg", ".", "width", "self", ".", "_format", "=", "'<'", "+", "num_points", "*", "'ffff'" ]
Set the buffer formatting.
[ "Set", "the", "buffer", "formatting", "." ]
03d9b37dd6b66896cdfe173905c9413c8c3c5df6
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/ensenso_sensor.py#L42-L45
12,299
BerkeleyAutomation/perception
perception/ensenso_sensor.py
EnsensoSensor._set_camera_properties
def _set_camera_properties(self, msg): """ Set the camera intrinsics from an info msg. """ focal_x = msg.K[0] focal_y = msg.K[4] center_x = msg.K[2] center_y = msg.K[5] im_height = msg.height im_width = msg.width self._camera_intr = CameraIntrinsics(self._frame, focal_x, focal_y, center_x, center_y, height=im_height, width=im_width)
python
def _set_camera_properties(self, msg): focal_x = msg.K[0] focal_y = msg.K[4] center_x = msg.K[2] center_y = msg.K[5] im_height = msg.height im_width = msg.width self._camera_intr = CameraIntrinsics(self._frame, focal_x, focal_y, center_x, center_y, height=im_height, width=im_width)
[ "def", "_set_camera_properties", "(", "self", ",", "msg", ")", ":", "focal_x", "=", "msg", ".", "K", "[", "0", "]", "focal_y", "=", "msg", ".", "K", "[", "4", "]", "center_x", "=", "msg", ".", "K", "[", "2", "]", "center_y", "=", "msg", ".", "K...
Set the camera intrinsics from an info msg.
[ "Set", "the", "camera", "intrinsics", "from", "an", "info", "msg", "." ]
03d9b37dd6b66896cdfe173905c9413c8c3c5df6
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/ensenso_sensor.py#L47-L58