body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
@Metadata.property(cache=False, write=False) def trusted(self): '\n Returns:\n str: package trusted\n ' return self.__trusted
-5,860,853,983,412,719,000
Returns: str: package trusted
frictionless/package.py
trusted
augusto-herrmann/frictionless-py
python
@Metadata.property(cache=False, write=False) def trusted(self): '\n Returns:\n str: package trusted\n ' return self.__trusted
@Metadata.property def resources(self): '\n Returns:\n Resources[]: package resource\n ' resources = self.get('resources', []) return self.metadata_attach('resources', resources)
6,241,615,724,240,368,000
Returns: Resources[]: package resource
frictionless/package.py
resources
augusto-herrmann/frictionless-py
python
@Metadata.property def resources(self): '\n Returns:\n Resources[]: package resource\n ' resources = self.get('resources', []) return self.metadata_attach('resources', resources)
@Metadata.property(cache=False, write=False) def resource_names(self): '\n Returns:\n str[]: package resource names\n ' return [resource.name for resource in self.resources]
5,593,440,850,178,711,000
Returns: str[]: package resource names
frictionless/package.py
resource_names
augusto-herrmann/frictionless-py
python
@Metadata.property(cache=False, write=False) def resource_names(self): '\n Returns:\n str[]: package resource names\n ' return [resource.name for resource in self.resources]
def add_resource(self, descriptor): 'Add new resource to package.\n\n Parameters:\n descriptor (dict): resource descriptor\n\n Returns:\n Resource/None: added `Resource` instance or `None` if not added\n ' self.setdefault('resources', []) self['resources'].append(descriptor) return self.resources[(- 1)]
-4,785,949,136,129,564,000
Add new resource to package. Parameters: descriptor (dict): resource descriptor Returns: Resource/None: added `Resource` instance or `None` if not added
frictionless/package.py
add_resource
augusto-herrmann/frictionless-py
python
def add_resource(self, descriptor): 'Add new resource to package.\n\n Parameters:\n descriptor (dict): resource descriptor\n\n Returns:\n Resource/None: added `Resource` instance or `None` if not added\n ' self.setdefault('resources', []) self['resources'].append(descriptor) return self.resources[(- 1)]
def get_resource(self, name): 'Get resource by name.\n\n Parameters:\n name (str): resource name\n\n Raises:\n FrictionlessException: if resource is not found\n\n Returns:\n Resource/None: `Resource` instance or `None` if not found\n ' for resource in self.resources: if (resource.name == name): return resource error = errors.PackageError(note=f'resource "{name}" does not exist') raise FrictionlessException(error)
-5,838,373,931,812,160,000
Get resource by name. Parameters: name (str): resource name Raises: FrictionlessException: if resource is not found Returns: Resource/None: `Resource` instance or `None` if not found
frictionless/package.py
get_resource
augusto-herrmann/frictionless-py
python
def get_resource(self, name): 'Get resource by name.\n\n Parameters:\n name (str): resource name\n\n Raises:\n FrictionlessException: if resource is not found\n\n Returns:\n Resource/None: `Resource` instance or `None` if not found\n ' for resource in self.resources: if (resource.name == name): return resource error = errors.PackageError(note=f'resource "{name}" does not exist') raise FrictionlessException(error)
def has_resource(self, name): 'Check if a resource is present\n\n Parameters:\n name (str): schema resource name\n\n Returns:\n bool: whether there is the resource\n ' for resource in self.resources: if (resource.name == name): return True return False
1,565,123,588,314,115,600
Check if a resource is present Parameters: name (str): schema resource name Returns: bool: whether there is the resource
frictionless/package.py
has_resource
augusto-herrmann/frictionless-py
python
def has_resource(self, name): 'Check if a resource is present\n\n Parameters:\n name (str): schema resource name\n\n Returns:\n bool: whether there is the resource\n ' for resource in self.resources: if (resource.name == name): return True return False
def remove_resource(self, name): 'Remove resource by name.\n\n Parameters:\n name (str): resource name\n\n Raises:\n FrictionlessException: if resource is not found\n\n Returns:\n Resource/None: removed `Resource` instances or `None` if not found\n ' resource = self.get_resource(name) self.resources.remove(resource) return resource
-8,696,772,322,796,107,000
Remove resource by name. Parameters: name (str): resource name Raises: FrictionlessException: if resource is not found Returns: Resource/None: removed `Resource` instances or `None` if not found
frictionless/package.py
remove_resource
augusto-herrmann/frictionless-py
python
def remove_resource(self, name): 'Remove resource by name.\n\n Parameters:\n name (str): resource name\n\n Raises:\n FrictionlessException: if resource is not found\n\n Returns:\n Resource/None: removed `Resource` instances or `None` if not found\n ' resource = self.get_resource(name) self.resources.remove(resource) return resource
def expand(self): 'Expand metadata\n\n It will add default values to the package.\n ' self.setdefault('resources', self.resources) self.setdefault('profile', self.profile) for resource in self.resources: resource.expand()
5,861,811,078,127,915,000
Expand metadata It will add default values to the package.
frictionless/package.py
expand
augusto-herrmann/frictionless-py
python
def expand(self): 'Expand metadata\n\n It will add default values to the package.\n ' self.setdefault('resources', self.resources) self.setdefault('profile', self.profile) for resource in self.resources: resource.expand()
def infer(self, *, stats=False): "Infer package's attributes\n\n Parameters:\n stats? (bool): stream files completely and infer stats\n " self.setdefault('profile', config.DEFAULT_PACKAGE_PROFILE) for resource in self.resources: resource.infer(stats=stats) if (len(self.resource_names) != len(set(self.resource_names))): seen_names = [] for (index, name) in enumerate(self.resource_names): count = (seen_names.count(name) + 1) if (count > 1): self.resources[index].name = ('%s%s' % (name, count)) seen_names.append(name)
4,752,128,565,801,970,000
Infer package's attributes Parameters: stats? (bool): stream files completely and infer stats
frictionless/package.py
infer
augusto-herrmann/frictionless-py
python
def infer(self, *, stats=False): "Infer package's attributes\n\n Parameters:\n stats? (bool): stream files completely and infer stats\n " self.setdefault('profile', config.DEFAULT_PACKAGE_PROFILE) for resource in self.resources: resource.infer(stats=stats) if (len(self.resource_names) != len(set(self.resource_names))): seen_names = [] for (index, name) in enumerate(self.resource_names): count = (seen_names.count(name) + 1) if (count > 1): self.resources[index].name = ('%s%s' % (name, count)) seen_names.append(name)
def to_copy(self): 'Create a copy of the package' descriptor = self.to_dict() descriptor.pop('resources', None) resources = [] for resource in self.resources: resources.append(resource.to_copy()) return Package(descriptor, resources=resources, basepath=self.__basepath, onerror=self.__onerror, trusted=self.__trusted)
4,082,379,780,980,601,000
Create a copy of the package
frictionless/package.py
to_copy
augusto-herrmann/frictionless-py
python
def to_copy(self): descriptor = self.to_dict() descriptor.pop('resources', None) resources = [] for resource in self.resources: resources.append(resource.to_copy()) return Package(descriptor, resources=resources, basepath=self.__basepath, onerror=self.__onerror, trusted=self.__trusted)
@staticmethod def from_bigquery(source, *, dialect=None): 'Import package from Bigquery\n\n Parameters:\n source (string): BigQuery `Service` object\n dialect (dict): BigQuery dialect\n\n Returns:\n Package: package\n ' storage = system.create_storage('bigquery', source, dialect=dialect) return storage.read_package()
6,416,628,453,939,661,000
Import package from Bigquery Parameters: source (string): BigQuery `Service` object dialect (dict): BigQuery dialect Returns: Package: package
frictionless/package.py
from_bigquery
augusto-herrmann/frictionless-py
python
@staticmethod def from_bigquery(source, *, dialect=None): 'Import package from Bigquery\n\n Parameters:\n source (string): BigQuery `Service` object\n dialect (dict): BigQuery dialect\n\n Returns:\n Package: package\n ' storage = system.create_storage('bigquery', source, dialect=dialect) return storage.read_package()
def to_bigquery(self, target, *, dialect=None): 'Export package to Bigquery\n\n Parameters:\n target (string): BigQuery `Service` object\n dialect (dict): BigQuery dialect\n\n Returns:\n BigqueryStorage: storage\n ' storage = system.create_storage('bigquery', target, dialect=dialect) storage.write_package(self.to_copy(), force=True) return storage
-3,979,124,997,178,724,400
Export package to Bigquery Parameters: target (string): BigQuery `Service` object dialect (dict): BigQuery dialect Returns: BigqueryStorage: storage
frictionless/package.py
to_bigquery
augusto-herrmann/frictionless-py
python
def to_bigquery(self, target, *, dialect=None): 'Export package to Bigquery\n\n Parameters:\n target (string): BigQuery `Service` object\n dialect (dict): BigQuery dialect\n\n Returns:\n BigqueryStorage: storage\n ' storage = system.create_storage('bigquery', target, dialect=dialect) storage.write_package(self.to_copy(), force=True) return storage
@staticmethod def from_ckan(source, *, dialect=None): 'Import package from CKAN\n\n Parameters:\n source (string): CKAN instance url e.g. "https://demo.ckan.org"\n dialect (dict): CKAN dialect\n\n Returns:\n Package: package\n ' storage = system.create_storage('ckan', source, dialect=dialect) return storage.read_package()
-507,925,738,641,212,350
Import package from CKAN Parameters: source (string): CKAN instance url e.g. "https://demo.ckan.org" dialect (dict): CKAN dialect Returns: Package: package
frictionless/package.py
from_ckan
augusto-herrmann/frictionless-py
python
@staticmethod def from_ckan(source, *, dialect=None): 'Import package from CKAN\n\n Parameters:\n source (string): CKAN instance url e.g. "https://demo.ckan.org"\n dialect (dict): CKAN dialect\n\n Returns:\n Package: package\n ' storage = system.create_storage('ckan', source, dialect=dialect) return storage.read_package()
def to_ckan(self, target, *, dialect=None): 'Export package to CKAN\n\n Parameters:\n target (string): CKAN instance url e.g. "https://demo.ckan.org"\n dialect (dict): CKAN dialect\n\n Returns:\n CkanStorage: storage\n ' storage = system.create_storage('ckan', target, dialect=dialect) storage.write_package(self.to_copy(), force=True) return storage
208,811,089,386,863,700
Export package to CKAN Parameters: target (string): CKAN instance url e.g. "https://demo.ckan.org" dialect (dict): CKAN dialect Returns: CkanStorage: storage
frictionless/package.py
to_ckan
augusto-herrmann/frictionless-py
python
def to_ckan(self, target, *, dialect=None): 'Export package to CKAN\n\n Parameters:\n target (string): CKAN instance url e.g. "https://demo.ckan.org"\n dialect (dict): CKAN dialect\n\n Returns:\n CkanStorage: storage\n ' storage = system.create_storage('ckan', target, dialect=dialect) storage.write_package(self.to_copy(), force=True) return storage
@staticmethod def from_sql(source, *, dialect=None): 'Import package from SQL\n\n Parameters:\n source (any): SQL connection string of engine\n dialect (dict): SQL dialect\n\n Returns:\n Package: package\n ' storage = system.create_storage('sql', source, dialect=dialect) return storage.read_package()
4,723,093,938,729,500,000
Import package from SQL Parameters: source (any): SQL connection string of engine dialect (dict): SQL dialect Returns: Package: package
frictionless/package.py
from_sql
augusto-herrmann/frictionless-py
python
@staticmethod def from_sql(source, *, dialect=None): 'Import package from SQL\n\n Parameters:\n source (any): SQL connection string of engine\n dialect (dict): SQL dialect\n\n Returns:\n Package: package\n ' storage = system.create_storage('sql', source, dialect=dialect) return storage.read_package()
def to_sql(self, target, *, dialect=None): 'Export package to SQL\n\n Parameters:\n target (any): SQL connection string of engine\n dialect (dict): SQL dialect\n\n Returns:\n SqlStorage: storage\n ' storage = system.create_storage('sql', target, dialect=dialect) storage.write_package(self.to_copy(), force=True) return storage
-6,703,690,496,253,987,000
Export package to SQL Parameters: target (any): SQL connection string of engine dialect (dict): SQL dialect Returns: SqlStorage: storage
frictionless/package.py
to_sql
augusto-herrmann/frictionless-py
python
def to_sql(self, target, *, dialect=None): 'Export package to SQL\n\n Parameters:\n target (any): SQL connection string of engine\n dialect (dict): SQL dialect\n\n Returns:\n SqlStorage: storage\n ' storage = system.create_storage('sql', target, dialect=dialect) storage.write_package(self.to_copy(), force=True) return storage
@staticmethod def from_zip(path, **options): 'Create a package from ZIP\n\n Parameters:\n path(str): file path\n **options(dict): resouce options\n ' return Package(descriptor=path, **options)
6,982,609,240,657,718,000
Create a package from ZIP Parameters: path(str): file path **options(dict): resouce options
frictionless/package.py
from_zip
augusto-herrmann/frictionless-py
python
@staticmethod def from_zip(path, **options): 'Create a package from ZIP\n\n Parameters:\n path(str): file path\n **options(dict): resouce options\n ' return Package(descriptor=path, **options)
def to_zip(self, path, *, encoder_class=None): 'Save package to a zip\n\n Parameters:\n path (str): target path\n encoder_class (object): json encoder class\n\n Raises:\n FrictionlessException: on any error\n ' try: with zipfile.ZipFile(path, 'w') as archive: package_descriptor = self.to_dict() for (index, resource) in enumerate(self.resources): descriptor = package_descriptor['resources'][index] if resource.remote: pass elif resource.memory: if (not isinstance(resource.data, list)): path = f'{resource.name}.csv' descriptor['path'] = path del descriptor['data'] with tempfile.NamedTemporaryFile() as file: tgt = Resource(path=file.name, format='csv', trusted=True) resource.write(tgt) archive.write(file.name, path) elif resource.multipart: for (path, fullpath) in zip(resource.path, resource.fullpath): if os.path.isfile(fullpath): if (not helpers.is_safe_path(fullpath)): note = f'Zipping usafe "{fullpath}" is not supported' error = errors.PackageError(note=note) raise FrictionlessException(error) archive.write(fullpath, path) else: path = resource.path fullpath = resource.fullpath if os.path.isfile(fullpath): if (not helpers.is_safe_path(fullpath)): note = f'Zipping usafe "{fullpath}" is not supported' error = errors.PackageError(note=note) raise FrictionlessException(error) archive.write(fullpath, path) archive.writestr('datapackage.json', json.dumps(package_descriptor, indent=2, ensure_ascii=False, cls=encoder_class)) except Exception as exception: error = errors.PackageError(note=str(exception)) raise FrictionlessException(error) from exception
-2,265,819,970,152,377,600
Save package to a zip Parameters: path (str): target path encoder_class (object): json encoder class Raises: FrictionlessException: on any error
frictionless/package.py
to_zip
augusto-herrmann/frictionless-py
python
def to_zip(self, path, *, encoder_class=None): 'Save package to a zip\n\n Parameters:\n path (str): target path\n encoder_class (object): json encoder class\n\n Raises:\n FrictionlessException: on any error\n ' try: with zipfile.ZipFile(path, 'w') as archive: package_descriptor = self.to_dict() for (index, resource) in enumerate(self.resources): descriptor = package_descriptor['resources'][index] if resource.remote: pass elif resource.memory: if (not isinstance(resource.data, list)): path = f'{resource.name}.csv' descriptor['path'] = path del descriptor['data'] with tempfile.NamedTemporaryFile() as file: tgt = Resource(path=file.name, format='csv', trusted=True) resource.write(tgt) archive.write(file.name, path) elif resource.multipart: for (path, fullpath) in zip(resource.path, resource.fullpath): if os.path.isfile(fullpath): if (not helpers.is_safe_path(fullpath)): note = f'Zipping usafe "{fullpath}" is not supported' error = errors.PackageError(note=note) raise FrictionlessException(error) archive.write(fullpath, path) else: path = resource.path fullpath = resource.fullpath if os.path.isfile(fullpath): if (not helpers.is_safe_path(fullpath)): note = f'Zipping usafe "{fullpath}" is not supported' error = errors.PackageError(note=note) raise FrictionlessException(error) archive.write(fullpath, path) archive.writestr('datapackage.json', json.dumps(package_descriptor, indent=2, ensure_ascii=False, cls=encoder_class)) except Exception as exception: error = errors.PackageError(note=str(exception)) raise FrictionlessException(error) from exception
def params(self, **kwargs): "\n Specify query params to be used when executing the search. All the\n keyword arguments will override the current values. See\n https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search\n for all available parameters.\n\n Example::\n\n s = Search()\n s = s.params(routing='user-1', preference='local')\n " s = self._clone() s._params.update(kwargs) return s
984,118,992,187,226,400
Specify query params to be used when executing the search. All the keyword arguments will override the current values. See https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search for all available parameters. Example:: s = Search() s = s.params(routing='user-1', preference='local')
elasticsearch_dsl/search.py
params
cfpb/elasticsearch-dsl-py
python
def params(self, **kwargs): "\n Specify query params to be used when executing the search. All the\n keyword arguments will override the current values. See\n https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search\n for all available parameters.\n\n Example::\n\n s = Search()\n s = s.params(routing='user-1', preference='local')\n " s = self._clone() s._params.update(kwargs) return s
def index(self, *index): "\n Set the index for the search. If called empty it will remove all information.\n\n Example:\n\n s = Search()\n s = s.index('twitter-2015.01.01', 'twitter-2015.01.02')\n s = s.index(['twitter-2015.01.01', 'twitter-2015.01.02'])\n " s = self._clone() if (not index): s._index = None else: indexes = [] for i in index: if isinstance(i, string_types): indexes.append(i) elif isinstance(i, list): indexes += i elif isinstance(i, tuple): indexes += list(i) s._index = ((self._index or []) + indexes) return s
27,993,767,929,939,330
Set the index for the search. If called empty it will remove all information. Example: s = Search() s = s.index('twitter-2015.01.01', 'twitter-2015.01.02') s = s.index(['twitter-2015.01.01', 'twitter-2015.01.02'])
elasticsearch_dsl/search.py
index
cfpb/elasticsearch-dsl-py
python
def index(self, *index): "\n Set the index for the search. If called empty it will remove all information.\n\n Example:\n\n s = Search()\n s = s.index('twitter-2015.01.01', 'twitter-2015.01.02')\n s = s.index(['twitter-2015.01.01', 'twitter-2015.01.02'])\n " s = self._clone() if (not index): s._index = None else: indexes = [] for i in index: if isinstance(i, string_types): indexes.append(i) elif isinstance(i, list): indexes += i elif isinstance(i, tuple): indexes += list(i) s._index = ((self._index or []) + indexes) return s
def doc_type(self, *doc_type, **kwargs): "\n Set the type to search through. You can supply a single value or\n multiple. Values can be strings or subclasses of ``Document``.\n\n You can also pass in any keyword arguments, mapping a doc_type to a\n callback that should be used instead of the Hit class.\n\n If no doc_type is supplied any information stored on the instance will\n be erased.\n\n Example:\n\n s = Search().doc_type('product', 'store', User, custom=my_callback)\n " s = self._clone() if ((not doc_type) and (not kwargs)): s._doc_type = [] s._doc_type_map = {} else: s._doc_type.extend(doc_type) s._doc_type.extend(kwargs.keys()) s._doc_type_map.update(kwargs) return s
-2,262,673,354,933,794,300
Set the type to search through. You can supply a single value or multiple. Values can be strings or subclasses of ``Document``. You can also pass in any keyword arguments, mapping a doc_type to a callback that should be used instead of the Hit class. If no doc_type is supplied any information stored on the instance will be erased. Example: s = Search().doc_type('product', 'store', User, custom=my_callback)
elasticsearch_dsl/search.py
doc_type
cfpb/elasticsearch-dsl-py
python
def doc_type(self, *doc_type, **kwargs): "\n Set the type to search through. You can supply a single value or\n multiple. Values can be strings or subclasses of ``Document``.\n\n You can also pass in any keyword arguments, mapping a doc_type to a\n callback that should be used instead of the Hit class.\n\n If no doc_type is supplied any information stored on the instance will\n be erased.\n\n Example:\n\n s = Search().doc_type('product', 'store', User, custom=my_callback)\n " s = self._clone() if ((not doc_type) and (not kwargs)): s._doc_type = [] s._doc_type_map = {} else: s._doc_type.extend(doc_type) s._doc_type.extend(kwargs.keys()) s._doc_type_map.update(kwargs) return s
def using(self, client): '\n Associate the search request with an elasticsearch client. A fresh copy\n will be returned with current instance remaining unchanged.\n\n :arg client: an instance of ``elasticsearch.Elasticsearch`` to use or\n an alias to look up in ``elasticsearch_dsl.connections``\n\n ' s = self._clone() s._using = client return s
-2,617,029,962,027,293,700
Associate the search request with an elasticsearch client. A fresh copy will be returned with current instance remaining unchanged. :arg client: an instance of ``elasticsearch.Elasticsearch`` to use or an alias to look up in ``elasticsearch_dsl.connections``
elasticsearch_dsl/search.py
using
cfpb/elasticsearch-dsl-py
python
def using(self, client): '\n Associate the search request with an elasticsearch client. A fresh copy\n will be returned with current instance remaining unchanged.\n\n :arg client: an instance of ``elasticsearch.Elasticsearch`` to use or\n an alias to look up in ``elasticsearch_dsl.connections``\n\n ' s = self._clone() s._using = client return s
def extra(self, **kwargs): '\n Add extra keys to the request body. Mostly here for backwards\n compatibility.\n ' s = self._clone() if ('from_' in kwargs): kwargs['from'] = kwargs.pop('from_') s._extra.update(kwargs) return s
2,489,825,865,943,227,000
Add extra keys to the request body. Mostly here for backwards compatibility.
elasticsearch_dsl/search.py
extra
cfpb/elasticsearch-dsl-py
python
def extra(self, **kwargs): '\n Add extra keys to the request body. Mostly here for backwards\n compatibility.\n ' s = self._clone() if ('from_' in kwargs): kwargs['from'] = kwargs.pop('from_') s._extra.update(kwargs) return s
def __init__(self, **kwargs): '\n Search request to elasticsearch.\n\n :arg using: `Elasticsearch` instance to use\n :arg index: limit the search to index\n :arg doc_type: only query this type.\n\n All the parameters supplied (or omitted) at creation type can be later\n overridden by methods (`using`, `index` and `doc_type` respectively).\n ' super(Search, self).__init__(**kwargs) self.aggs = AggsProxy(self) self._sort = [] self._source = None self._highlight = {} self._highlight_opts = {} self._suggest = {} self._script_fields = {} self._response_class = Response self._query_proxy = QueryProxy(self, 'query') self._post_filter_proxy = QueryProxy(self, 'post_filter')
5,466,510,326,525,728,000
Search request to elasticsearch. :arg using: `Elasticsearch` instance to use :arg index: limit the search to index :arg doc_type: only query this type. All the parameters supplied (or omitted) at creation type can be later overridden by methods (`using`, `index` and `doc_type` respectively).
elasticsearch_dsl/search.py
__init__
cfpb/elasticsearch-dsl-py
python
def __init__(self, **kwargs): '\n Search request to elasticsearch.\n\n :arg using: `Elasticsearch` instance to use\n :arg index: limit the search to index\n :arg doc_type: only query this type.\n\n All the parameters supplied (or omitted) at creation type can be later\n overridden by methods (`using`, `index` and `doc_type` respectively).\n ' super(Search, self).__init__(**kwargs) self.aggs = AggsProxy(self) self._sort = [] self._source = None self._highlight = {} self._highlight_opts = {} self._suggest = {} self._script_fields = {} self._response_class = Response self._query_proxy = QueryProxy(self, 'query') self._post_filter_proxy = QueryProxy(self, 'post_filter')
def __iter__(self): '\n Iterate over the hits.\n ' return iter(self.execute())
8,854,762,045,459,427,000
Iterate over the hits.
elasticsearch_dsl/search.py
__iter__
cfpb/elasticsearch-dsl-py
python
def __iter__(self): '\n \n ' return iter(self.execute())
def __getitem__(self, n): '\n Support slicing the `Search` instance for pagination.\n\n Slicing equates to the from/size parameters. E.g.::\n\n s = Search().query(...)[0:25]\n\n is equivalent to::\n\n s = Search().query(...).extra(from_=0, size=25)\n\n ' s = self._clone() if isinstance(n, slice): if ((n.start and (n.start < 0)) or (n.stop and (n.stop < 0))): raise ValueError('Search does not support negative slicing.') s._extra['from'] = (n.start or 0) s._extra['size'] = max(0, ((n.stop - (n.start or 0)) if (n.stop is not None) else 10)) return s else: if (n < 0): raise ValueError('Search does not support negative indexing.') s._extra['from'] = n s._extra['size'] = 1 return s
5,948,883,159,155,191,000
Support slicing the `Search` instance for pagination. Slicing equates to the from/size parameters. E.g.:: s = Search().query(...)[0:25] is equivalent to:: s = Search().query(...).extra(from_=0, size=25)
elasticsearch_dsl/search.py
__getitem__
cfpb/elasticsearch-dsl-py
python
def __getitem__(self, n): '\n Support slicing the `Search` instance for pagination.\n\n Slicing equates to the from/size parameters. E.g.::\n\n s = Search().query(...)[0:25]\n\n is equivalent to::\n\n s = Search().query(...).extra(from_=0, size=25)\n\n ' s = self._clone() if isinstance(n, slice): if ((n.start and (n.start < 0)) or (n.stop and (n.stop < 0))): raise ValueError('Search does not support negative slicing.') s._extra['from'] = (n.start or 0) s._extra['size'] = max(0, ((n.stop - (n.start or 0)) if (n.stop is not None) else 10)) return s else: if (n < 0): raise ValueError('Search does not support negative indexing.') s._extra['from'] = n s._extra['size'] = 1 return s
@classmethod def from_dict(cls, d): '\n Construct a new `Search` instance from a raw dict containing the search\n body. Useful when migrating from raw dictionaries.\n\n Example::\n\n s = Search.from_dict({\n "query": {\n "bool": {\n "must": [...]\n }\n },\n "aggs": {...}\n })\n s = s.filter(\'term\', published=True)\n ' s = cls() s.update_from_dict(d) return s
-7,138,592,016,995,946,000
Construct a new `Search` instance from a raw dict containing the search body. Useful when migrating from raw dictionaries. Example:: s = Search.from_dict({ "query": { "bool": { "must": [...] } }, "aggs": {...} }) s = s.filter('term', published=True)
elasticsearch_dsl/search.py
from_dict
cfpb/elasticsearch-dsl-py
python
@classmethod def from_dict(cls, d): '\n Construct a new `Search` instance from a raw dict containing the search\n body. Useful when migrating from raw dictionaries.\n\n Example::\n\n s = Search.from_dict({\n "query": {\n "bool": {\n "must": [...]\n }\n },\n "aggs": {...}\n })\n s = s.filter(\'term\', published=True)\n ' s = cls() s.update_from_dict(d) return s
def _clone(self): '\n Return a clone of the current search request. Performs a shallow copy\n of all the underlying objects. Used internally by most state modifying\n APIs.\n ' s = super(Search, self)._clone() s._response_class = self._response_class s._sort = self._sort[:] s._source = (copy.copy(self._source) if (self._source is not None) else None) s._highlight = self._highlight.copy() s._highlight_opts = self._highlight_opts.copy() s._suggest = self._suggest.copy() s._script_fields = self._script_fields.copy() for x in ('query', 'post_filter'): getattr(s, x)._proxied = getattr(self, x)._proxied if self.aggs._params.get('aggs'): s.aggs._params = {'aggs': self.aggs._params['aggs'].copy()} return s
-6,639,115,294,444,079,000
Return a clone of the current search request. Performs a shallow copy of all the underlying objects. Used internally by most state modifying APIs.
elasticsearch_dsl/search.py
_clone
cfpb/elasticsearch-dsl-py
python
def _clone(self): '\n Return a clone of the current search request. Performs a shallow copy\n of all the underlying objects. Used internally by most state modifying\n APIs.\n ' s = super(Search, self)._clone() s._response_class = self._response_class s._sort = self._sort[:] s._source = (copy.copy(self._source) if (self._source is not None) else None) s._highlight = self._highlight.copy() s._highlight_opts = self._highlight_opts.copy() s._suggest = self._suggest.copy() s._script_fields = self._script_fields.copy() for x in ('query', 'post_filter'): getattr(s, x)._proxied = getattr(self, x)._proxied if self.aggs._params.get('aggs'): s.aggs._params = {'aggs': self.aggs._params['aggs'].copy()} return s
def response_class(self, cls): '\n Override the default wrapper used for the response.\n ' s = self._clone() s._response_class = cls return s
-3,916,456,081,902,850,000
Override the default wrapper used for the response.
elasticsearch_dsl/search.py
response_class
cfpb/elasticsearch-dsl-py
python
def response_class(self, cls): '\n \n ' s = self._clone() s._response_class = cls return s
def update_from_dict(self, d): '\n Apply options from a serialized body to the current instance. Modifies\n the object in-place. Used mostly by ``from_dict``.\n ' d = d.copy() if ('query' in d): self.query._proxied = Q(d.pop('query')) if ('post_filter' in d): self.post_filter._proxied = Q(d.pop('post_filter')) aggs = d.pop('aggs', d.pop('aggregations', {})) if aggs: self.aggs._params = {'aggs': {name: A(value) for (name, value) in iteritems(aggs)}} if ('sort' in d): self._sort = d.pop('sort') if ('_source' in d): self._source = d.pop('_source') if ('highlight' in d): high = d.pop('highlight').copy() self._highlight = high.pop('fields') self._highlight_opts = high if ('suggest' in d): self._suggest = d.pop('suggest') if ('text' in self._suggest): text = self._suggest.pop('text') for s in self._suggest.values(): s.setdefault('text', text) if ('script_fields' in d): self._script_fields = d.pop('script_fields') self._extra.update(d) return self
-3,957,944,877,918,818,000
Apply options from a serialized body to the current instance. Modifies the object in-place. Used mostly by ``from_dict``.
elasticsearch_dsl/search.py
update_from_dict
cfpb/elasticsearch-dsl-py
python
def update_from_dict(self, d): '\n Apply options from a serialized body to the current instance. Modifies\n the object in-place. Used mostly by ``from_dict``.\n ' d = d.copy() if ('query' in d): self.query._proxied = Q(d.pop('query')) if ('post_filter' in d): self.post_filter._proxied = Q(d.pop('post_filter')) aggs = d.pop('aggs', d.pop('aggregations', {})) if aggs: self.aggs._params = {'aggs': {name: A(value) for (name, value) in iteritems(aggs)}} if ('sort' in d): self._sort = d.pop('sort') if ('_source' in d): self._source = d.pop('_source') if ('highlight' in d): high = d.pop('highlight').copy() self._highlight = high.pop('fields') self._highlight_opts = high if ('suggest' in d): self._suggest = d.pop('suggest') if ('text' in self._suggest): text = self._suggest.pop('text') for s in self._suggest.values(): s.setdefault('text', text) if ('script_fields' in d): self._script_fields = d.pop('script_fields') self._extra.update(d) return self
def script_fields(self, **kwargs): '\n Define script fields to be calculated on hits. See\n https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-script-fields.html\n for more details.\n\n Example::\n\n s = Search()\n s = s.script_fields(times_two="doc[\'field\'].value * 2")\n s = s.script_fields(\n times_three={\n \'script\': {\n \'inline\': "doc[\'field\'].value * params.n",\n \'params\': {\'n\': 3}\n }\n }\n )\n\n ' s = self._clone() for name in kwargs: if isinstance(kwargs[name], string_types): kwargs[name] = {'script': kwargs[name]} s._script_fields.update(kwargs) return s
2,069,877,232,227,935,000
Define script fields to be calculated on hits. See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-script-fields.html for more details. Example:: s = Search() s = s.script_fields(times_two="doc['field'].value * 2") s = s.script_fields( times_three={ 'script': { 'inline': "doc['field'].value * params.n", 'params': {'n': 3} } } )
elasticsearch_dsl/search.py
script_fields
cfpb/elasticsearch-dsl-py
python
def script_fields(self, **kwargs): '\n Define script fields to be calculated on hits. See\n https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-script-fields.html\n for more details.\n\n Example::\n\n s = Search()\n s = s.script_fields(times_two="doc[\'field\'].value * 2")\n s = s.script_fields(\n times_three={\n \'script\': {\n \'inline\': "doc[\'field\'].value * params.n",\n \'params\': {\'n\': 3}\n }\n }\n )\n\n ' s = self._clone() for name in kwargs: if isinstance(kwargs[name], string_types): kwargs[name] = {'script': kwargs[name]} s._script_fields.update(kwargs) return s
def source(self, fields=None, **kwargs): '\n Selectively control how the _source field is returned.\n\n :arg fields: wildcard string, array of wildcards, or dictionary of includes and excludes\n\n If ``fields`` is None, the entire document will be returned for\n each hit. If fields is a dictionary with keys of \'includes\' and/or\n \'excludes\' the fields will be either included or excluded appropriately.\n\n Calling this multiple times with the same named parameter will override the\n previous values with the new ones.\n\n Example::\n\n s = Search()\n s = s.source(includes=[\'obj1.*\'], excludes=["*.description"])\n\n s = Search()\n s = s.source(includes=[\'obj1.*\']).source(excludes=["*.description"])\n\n ' s = self._clone() if (fields and kwargs): raise ValueError('You cannot specify fields and kwargs at the same time.') if (fields is not None): s._source = fields return s if (kwargs and (not isinstance(s._source, dict))): s._source = {} for (key, value) in kwargs.items(): if (value is None): try: del s._source[key] except KeyError: pass else: s._source[key] = value return s
-8,599,830,302,626,372,000
Selectively control how the _source field is returned. :arg fields: wildcard string, array of wildcards, or dictionary of includes and excludes If ``fields`` is None, the entire document will be returned for each hit. If fields is a dictionary with keys of 'includes' and/or 'excludes' the fields will be either included or excluded appropriately. Calling this multiple times with the same named parameter will override the previous values with the new ones. Example:: s = Search() s = s.source(includes=['obj1.*'], excludes=["*.description"]) s = Search() s = s.source(includes=['obj1.*']).source(excludes=["*.description"])
elasticsearch_dsl/search.py
source
cfpb/elasticsearch-dsl-py
python
def source(self, fields=None, **kwargs): '\n Selectively control how the _source field is returned.\n\n :arg fields: wildcard string, array of wildcards, or dictionary of includes and excludes\n\n If ``fields`` is None, the entire document will be returned for\n each hit. If fields is a dictionary with keys of \'includes\' and/or\n \'excludes\' the fields will be either included or excluded appropriately.\n\n Calling this multiple times with the same named parameter will override the\n previous values with the new ones.\n\n Example::\n\n s = Search()\n s = s.source(includes=[\'obj1.*\'], excludes=["*.description"])\n\n s = Search()\n s = s.source(includes=[\'obj1.*\']).source(excludes=["*.description"])\n\n ' s = self._clone() if (fields and kwargs): raise ValueError('You cannot specify fields and kwargs at the same time.') if (fields is not None): s._source = fields return s if (kwargs and (not isinstance(s._source, dict))): s._source = {} for (key, value) in kwargs.items(): if (value is None): try: del s._source[key] except KeyError: pass else: s._source[key] = value return s
def sort(self, *keys): '\n Add sorting information to the search request. If called without\n arguments it will remove all sort requirements. Otherwise it will\n replace them. Acceptable arguments are::\n\n \'some.field\'\n \'-some.other.field\'\n {\'different.field\': {\'any\': \'dict\'}}\n\n so for example::\n\n s = Search().sort(\n \'category\',\n \'-title\',\n {"price" : {"order" : "asc", "mode" : "avg"}}\n )\n\n will sort by ``category``, ``title`` (in descending order) and\n ``price`` in ascending order using the ``avg`` mode.\n\n The API returns a copy of the Search object and can thus be chained.\n ' s = self._clone() s._sort = [] for k in keys: if (isinstance(k, string_types) and k.startswith('-')): if (k[1:] == '_score'): raise IllegalOperation('Sorting by `-_score` is not allowed.') k = {k[1:]: {'order': 'desc'}} s._sort.append(k) return s
-5,992,966,046,639,226,000
Add sorting information to the search request. If called without arguments it will remove all sort requirements. Otherwise it will replace them. Acceptable arguments are:: 'some.field' '-some.other.field' {'different.field': {'any': 'dict'}} so for example:: s = Search().sort( 'category', '-title', {"price" : {"order" : "asc", "mode" : "avg"}} ) will sort by ``category``, ``title`` (in descending order) and ``price`` in ascending order using the ``avg`` mode. The API returns a copy of the Search object and can thus be chained.
elasticsearch_dsl/search.py
sort
cfpb/elasticsearch-dsl-py
python
def sort(self, *keys): '\n Add sorting information to the search request. If called without\n arguments it will remove all sort requirements. Otherwise it will\n replace them. Acceptable arguments are::\n\n \'some.field\'\n \'-some.other.field\'\n {\'different.field\': {\'any\': \'dict\'}}\n\n so for example::\n\n s = Search().sort(\n \'category\',\n \'-title\',\n {"price" : {"order" : "asc", "mode" : "avg"}}\n )\n\n will sort by ``category``, ``title`` (in descending order) and\n ``price`` in ascending order using the ``avg`` mode.\n\n The API returns a copy of the Search object and can thus be chained.\n ' s = self._clone() s._sort = [] for k in keys: if (isinstance(k, string_types) and k.startswith('-')): if (k[1:] == '_score'): raise IllegalOperation('Sorting by `-_score` is not allowed.') k = {k[1:]: {'order': 'desc'}} s._sort.append(k) return s
def highlight_options(self, **kwargs): "\n Update the global highlighting options used for this request. For\n example::\n\n s = Search()\n s = s.highlight_options(order='score')\n " s = self._clone() s._highlight_opts.update(kwargs) return s
6,445,549,317,436,081,000
Update the global highlighting options used for this request. For example:: s = Search() s = s.highlight_options(order='score')
elasticsearch_dsl/search.py
highlight_options
cfpb/elasticsearch-dsl-py
python
def highlight_options(self, **kwargs): "\n Update the global highlighting options used for this request. For\n example::\n\n s = Search()\n s = s.highlight_options(order='score')\n " s = self._clone() s._highlight_opts.update(kwargs) return s
def highlight(self, *fields, **kwargs): '\n Request highlighting of some fields. All keyword arguments passed in will be\n used as parameters for all the fields in the ``fields`` parameter. Example::\n\n Search().highlight(\'title\', \'body\', fragment_size=50)\n\n will produce the equivalent of::\n\n {\n "highlight": {\n "fields": {\n "body": {"fragment_size": 50},\n "title": {"fragment_size": 50}\n }\n }\n }\n\n If you want to have different options for different fields\n you can call ``highlight`` twice::\n\n Search().highlight(\'title\', fragment_size=50).highlight(\'body\', fragment_size=100)\n\n which will produce::\n\n {\n "highlight": {\n "fields": {\n "body": {"fragment_size": 100},\n "title": {"fragment_size": 50}\n }\n }\n }\n\n ' s = self._clone() for f in fields: s._highlight[f] = kwargs return s
7,883,578,960,692,520,000
Request highlighting of some fields. All keyword arguments passed in will be used as parameters for all the fields in the ``fields`` parameter. Example:: Search().highlight('title', 'body', fragment_size=50) will produce the equivalent of:: { "highlight": { "fields": { "body": {"fragment_size": 50}, "title": {"fragment_size": 50} } } } If you want to have different options for different fields you can call ``highlight`` twice:: Search().highlight('title', fragment_size=50).highlight('body', fragment_size=100) which will produce:: { "highlight": { "fields": { "body": {"fragment_size": 100}, "title": {"fragment_size": 50} } } }
elasticsearch_dsl/search.py
highlight
cfpb/elasticsearch-dsl-py
python
def highlight(self, *fields, **kwargs): '\n Request highlighting of some fields. All keyword arguments passed in will be\n used as parameters for all the fields in the ``fields`` parameter. Example::\n\n Search().highlight(\'title\', \'body\', fragment_size=50)\n\n will produce the equivalent of::\n\n {\n "highlight": {\n "fields": {\n "body": {"fragment_size": 50},\n "title": {"fragment_size": 50}\n }\n }\n }\n\n If you want to have different options for different fields\n you can call ``highlight`` twice::\n\n Search().highlight(\'title\', fragment_size=50).highlight(\'body\', fragment_size=100)\n\n which will produce::\n\n {\n "highlight": {\n "fields": {\n "body": {"fragment_size": 100},\n "title": {"fragment_size": 50}\n }\n }\n }\n\n ' s = self._clone() for f in fields: s._highlight[f] = kwargs return s
def suggest(self, name, text, **kwargs): "\n Add a suggestions request to the search.\n\n :arg name: name of the suggestion\n :arg text: text to suggest on\n\n All keyword arguments will be added to the suggestions body. For example::\n\n s = Search()\n s = s.suggest('suggestion-1', 'Elasticsearch', term={'field': 'body'})\n " s = self._clone() s._suggest[name] = {'text': text} s._suggest[name].update(kwargs) return s
3,912,557,051,867,161,000
Add a suggestions request to the search. :arg name: name of the suggestion :arg text: text to suggest on All keyword arguments will be added to the suggestions body. For example:: s = Search() s = s.suggest('suggestion-1', 'Elasticsearch', term={'field': 'body'})
elasticsearch_dsl/search.py
suggest
cfpb/elasticsearch-dsl-py
python
def suggest(self, name, text, **kwargs): "\n Add a suggestions request to the search.\n\n :arg name: name of the suggestion\n :arg text: text to suggest on\n\n All keyword arguments will be added to the suggestions body. For example::\n\n s = Search()\n s = s.suggest('suggestion-1', 'Elasticsearch', term={'field': 'body'})\n " s = self._clone() s._suggest[name] = {'text': text} s._suggest[name].update(kwargs) return s
def to_dict(self, count=False, **kwargs): "\n Serialize the search into the dictionary that will be sent over as the\n request's body.\n\n :arg count: a flag to specify if we are interested in a body for count -\n no aggregations, no pagination bounds etc.\n\n All additional keyword arguments will be included into the dictionary.\n " d = {} if self.query: d['query'] = self.query.to_dict() if (not count): if self.post_filter: d['post_filter'] = self.post_filter.to_dict() if self.aggs.aggs: d.update(self.aggs.to_dict()) if self._sort: d['sort'] = self._sort d.update(self._extra) if (self._source not in (None, {})): d['_source'] = self._source if self._highlight: d['highlight'] = {'fields': self._highlight} d['highlight'].update(self._highlight_opts) if self._suggest: d['suggest'] = self._suggest if self._script_fields: d['script_fields'] = self._script_fields d.update(kwargs) return d
-5,094,944,635,325,877,000
Serialize the search into the dictionary that will be sent over as the request's body. :arg count: a flag to specify if we are interested in a body for count - no aggregations, no pagination bounds etc. All additional keyword arguments will be included into the dictionary.
elasticsearch_dsl/search.py
to_dict
cfpb/elasticsearch-dsl-py
python
def to_dict(self, count=False, **kwargs): "\n Serialize the search into the dictionary that will be sent over as the\n request's body.\n\n :arg count: a flag to specify if we are interested in a body for count -\n no aggregations, no pagination bounds etc.\n\n All additional keyword arguments will be included into the dictionary.\n " d = {} if self.query: d['query'] = self.query.to_dict() if (not count): if self.post_filter: d['post_filter'] = self.post_filter.to_dict() if self.aggs.aggs: d.update(self.aggs.to_dict()) if self._sort: d['sort'] = self._sort d.update(self._extra) if (self._source not in (None, {})): d['_source'] = self._source if self._highlight: d['highlight'] = {'fields': self._highlight} d['highlight'].update(self._highlight_opts) if self._suggest: d['suggest'] = self._suggest if self._script_fields: d['script_fields'] = self._script_fields d.update(kwargs) return d
def count(self): '\n Return the number of hits matching the query and filters. Note that\n only the actual number is returned.\n ' if (hasattr(self, '_response') and (self._response.hits.total.relation == 'eq')): return self._response.hits.total.value es = get_connection(self._using) d = self.to_dict(count=True) return es.count(index=self._index, body=d, **self._params)['count']
4,067,295,734,994,645,500
Return the number of hits matching the query and filters. Note that only the actual number is returned.
elasticsearch_dsl/search.py
count
cfpb/elasticsearch-dsl-py
python
def count(self): '\n Return the number of hits matching the query and filters. Note that\n only the actual number is returned.\n ' if (hasattr(self, '_response') and (self._response.hits.total.relation == 'eq')): return self._response.hits.total.value es = get_connection(self._using) d = self.to_dict(count=True) return es.count(index=self._index, body=d, **self._params)['count']
def execute(self, ignore_cache=False): '\n Execute the search and return an instance of ``Response`` wrapping all\n the data.\n\n :arg ignore_cache: if set to ``True``, consecutive calls will hit\n ES, while cached result will be ignored. Defaults to `False`\n ' if (ignore_cache or (not hasattr(self, '_response'))): es = get_connection(self._using) self._response = self._response_class(self, es.search(index=self._index, body=self.to_dict(), **self._params)) return self._response
-769,132,555,925,094,400
Execute the search and return an instance of ``Response`` wrapping all the data. :arg ignore_cache: if set to ``True``, consecutive calls will hit ES, while cached result will be ignored. Defaults to `False`
elasticsearch_dsl/search.py
execute
cfpb/elasticsearch-dsl-py
python
def execute(self, ignore_cache=False): '\n Execute the search and return an instance of ``Response`` wrapping all\n the data.\n\n :arg ignore_cache: if set to ``True``, consecutive calls will hit\n ES, while cached result will be ignored. Defaults to `False`\n ' if (ignore_cache or (not hasattr(self, '_response'))): es = get_connection(self._using) self._response = self._response_class(self, es.search(index=self._index, body=self.to_dict(), **self._params)) return self._response
def scan(self): '\n Turn the search into a scan search and return a generator that will\n iterate over all the documents matching the query.\n\n Use ``params`` method to specify any additional arguments you with to\n pass to the underlying ``scan`` helper from ``elasticsearch-py`` -\n https://elasticsearch-py.readthedocs.io/en/master/helpers.html#elasticsearch.helpers.scan\n\n ' es = get_connection(self._using) for hit in scan(es, query=self.to_dict(), index=self._index, **self._params): (yield self._get_result(hit))
8,946,906,841,084,514,000
Turn the search into a scan search and return a generator that will iterate over all the documents matching the query. Use ``params`` method to specify any additional arguments you with to pass to the underlying ``scan`` helper from ``elasticsearch-py`` - https://elasticsearch-py.readthedocs.io/en/master/helpers.html#elasticsearch.helpers.scan
elasticsearch_dsl/search.py
scan
cfpb/elasticsearch-dsl-py
python
def scan(self): '\n Turn the search into a scan search and return a generator that will\n iterate over all the documents matching the query.\n\n Use ``params`` method to specify any additional arguments you with to\n pass to the underlying ``scan`` helper from ``elasticsearch-py`` -\n https://elasticsearch-py.readthedocs.io/en/master/helpers.html#elasticsearch.helpers.scan\n\n ' es = get_connection(self._using) for hit in scan(es, query=self.to_dict(), index=self._index, **self._params): (yield self._get_result(hit))
def delete(self): '\n delete() executes the query by delegating to delete_by_query()\n ' es = get_connection(self._using) return AttrDict(es.delete_by_query(index=self._index, body=self.to_dict(), **self._params))
1,368,681,962,290,731,800
delete() executes the query by delegating to delete_by_query()
elasticsearch_dsl/search.py
delete
cfpb/elasticsearch-dsl-py
python
def delete(self): '\n \n ' es = get_connection(self._using) return AttrDict(es.delete_by_query(index=self._index, body=self.to_dict(), **self._params))
def add(self, search): "\n Adds a new :class:`~elasticsearch_dsl.Search` object to the request::\n\n ms = MultiSearch(index='my-index')\n ms = ms.add(Search(doc_type=Category).filter('term', category='python'))\n ms = ms.add(Search(doc_type=Blog))\n " ms = self._clone() ms._searches.append(search) return ms
6,799,146,122,245,675,000
Adds a new :class:`~elasticsearch_dsl.Search` object to the request:: ms = MultiSearch(index='my-index') ms = ms.add(Search(doc_type=Category).filter('term', category='python')) ms = ms.add(Search(doc_type=Blog))
elasticsearch_dsl/search.py
add
cfpb/elasticsearch-dsl-py
python
def add(self, search): "\n Adds a new :class:`~elasticsearch_dsl.Search` object to the request::\n\n ms = MultiSearch(index='my-index')\n ms = ms.add(Search(doc_type=Category).filter('term', category='python'))\n ms = ms.add(Search(doc_type=Blog))\n " ms = self._clone() ms._searches.append(search) return ms
def execute(self, ignore_cache=False, raise_on_error=True): '\n Execute the multi search request and return a list of search results.\n ' if (ignore_cache or (not hasattr(self, '_response'))): es = get_connection(self._using) responses = es.msearch(index=self._index, body=self.to_dict(), **self._params) out = [] for (s, r) in zip(self._searches, responses['responses']): if r.get('error', False): if raise_on_error: raise TransportError('N/A', r['error']['type'], r['error']) r = None else: r = Response(s, r) out.append(r) self._response = out return self._response
-7,734,126,237,782,678,000
Execute the multi search request and return a list of search results.
elasticsearch_dsl/search.py
execute
cfpb/elasticsearch-dsl-py
python
def execute(self, ignore_cache=False, raise_on_error=True): '\n \n ' if (ignore_cache or (not hasattr(self, '_response'))): es = get_connection(self._using) responses = es.msearch(index=self._index, body=self.to_dict(), **self._params) out = [] for (s, r) in zip(self._searches, responses['responses']): if r.get('error', False): if raise_on_error: raise TransportError('N/A', r['error']['type'], r['error']) r = None else: r = Response(s, r) out.append(r) self._response = out return self._response
@pytest.fixture def good_predict_at(): 'A `predict_at` within `START`-`END` and ...\n\n ... a long enough history so that either `SHORT_TRAIN_HORIZON`\n or `LONG_TRAIN_HORIZON` works.\n ' return datetime.datetime(test_config.END.year, test_config.END.month, test_config.END.day, test_config.NOON, 0)
845,732,126,145,894,300
A `predict_at` within `START`-`END` and ... ... a long enough history so that either `SHORT_TRAIN_HORIZON` or `LONG_TRAIN_HORIZON` works.
tests/forecasts/timify/test_make_time_series.py
good_predict_at
webartifex/urban-meal-delivery
python
@pytest.fixture def good_predict_at(): 'A `predict_at` within `START`-`END` and ...\n\n ... a long enough history so that either `SHORT_TRAIN_HORIZON`\n or `LONG_TRAIN_HORIZON` works.\n ' return datetime.datetime(test_config.END.year, test_config.END.month, test_config.END.day, test_config.NOON, 0)
@pytest.fixture def bad_predict_at(): 'A `predict_at` within `START`-`END` but ...\n\n ... not a long enough history so that both `SHORT_TRAIN_HORIZON`\n and `LONG_TRAIN_HORIZON` do not work.\n ' predict_day = (test_config.END - datetime.timedelta(weeks=6, days=1)) return datetime.datetime(predict_day.year, predict_day.month, predict_day.day, test_config.NOON, 0)
9,116,327,730,989,172,000
A `predict_at` within `START`-`END` but ... ... not a long enough history so that both `SHORT_TRAIN_HORIZON` and `LONG_TRAIN_HORIZON` do not work.
tests/forecasts/timify/test_make_time_series.py
bad_predict_at
webartifex/urban-meal-delivery
python
@pytest.fixture def bad_predict_at(): 'A `predict_at` within `START`-`END` but ...\n\n ... not a long enough history so that both `SHORT_TRAIN_HORIZON`\n and `LONG_TRAIN_HORIZON` do not work.\n ' predict_day = (test_config.END - datetime.timedelta(weeks=6, days=1)) return datetime.datetime(predict_day.year, predict_day.month, predict_day.day, test_config.NOON, 0)
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_wrong_pixel(self, order_history, good_predict_at, train_horizon): 'A `pixel_id` that is not in the `grid`.' with pytest.raises(LookupError): order_history.make_horizontal_ts(pixel_id=999999, predict_at=good_predict_at, train_horizon=train_horizon)
4,505,417,187,260,774,000
A `pixel_id` that is not in the `grid`.
tests/forecasts/timify/test_make_time_series.py
test_wrong_pixel
webartifex/urban-meal-delivery
python
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_wrong_pixel(self, order_history, good_predict_at, train_horizon): with pytest.raises(LookupError): order_history.make_horizontal_ts(pixel_id=999999, predict_at=good_predict_at, train_horizon=train_horizon)
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_time_series_are_series(self, order_history, good_pixel_id, good_predict_at, train_horizon): 'The time series come as a `pd.Series`.' result = order_history.make_horizontal_ts(pixel_id=good_pixel_id, predict_at=good_predict_at, train_horizon=train_horizon) (training_ts, _, actuals_ts) = result assert isinstance(training_ts, pd.Series) assert (training_ts.name == 'n_orders') assert isinstance(actuals_ts, pd.Series) assert (actuals_ts.name == 'n_orders')
-460,741,185,384,608,830
The time series come as a `pd.Series`.
tests/forecasts/timify/test_make_time_series.py
test_time_series_are_series
webartifex/urban-meal-delivery
python
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_time_series_are_series(self, order_history, good_pixel_id, good_predict_at, train_horizon): result = order_history.make_horizontal_ts(pixel_id=good_pixel_id, predict_at=good_predict_at, train_horizon=train_horizon) (training_ts, _, actuals_ts) = result assert isinstance(training_ts, pd.Series) assert (training_ts.name == 'n_orders') assert isinstance(actuals_ts, pd.Series) assert (actuals_ts.name == 'n_orders')
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_time_series_have_correct_length(self, order_history, good_pixel_id, good_predict_at, train_horizon): 'The length of a training time series must be a multiple of `7` ...\n\n ... whereas the time series with the actual order counts has only `1` value.\n ' result = order_history.make_horizontal_ts(pixel_id=good_pixel_id, predict_at=good_predict_at, train_horizon=train_horizon) (training_ts, _, actuals_ts) = result assert (len(training_ts) == (7 * train_horizon)) assert (len(actuals_ts) == 1)
3,063,075,932,768,489,000
The length of a training time series must be a multiple of `7` ... ... whereas the time series with the actual order counts has only `1` value.
tests/forecasts/timify/test_make_time_series.py
test_time_series_have_correct_length
webartifex/urban-meal-delivery
python
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_time_series_have_correct_length(self, order_history, good_pixel_id, good_predict_at, train_horizon): 'The length of a training time series must be a multiple of `7` ...\n\n ... whereas the time series with the actual order counts has only `1` value.\n ' result = order_history.make_horizontal_ts(pixel_id=good_pixel_id, predict_at=good_predict_at, train_horizon=train_horizon) (training_ts, _, actuals_ts) = result assert (len(training_ts) == (7 * train_horizon)) assert (len(actuals_ts) == 1)
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_frequency_is_number_of_weekdays(self, order_history, good_pixel_id, good_predict_at, train_horizon): 'The `frequency` must be `7`.' result = order_history.make_horizontal_ts(pixel_id=good_pixel_id, predict_at=good_predict_at, train_horizon=train_horizon) (_, frequency, _) = result assert (frequency == 7)
7,620,836,816,034,598,000
The `frequency` must be `7`.
tests/forecasts/timify/test_make_time_series.py
test_frequency_is_number_of_weekdays
webartifex/urban-meal-delivery
python
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_frequency_is_number_of_weekdays(self, order_history, good_pixel_id, good_predict_at, train_horizon): result = order_history.make_horizontal_ts(pixel_id=good_pixel_id, predict_at=good_predict_at, train_horizon=train_horizon) (_, frequency, _) = result assert (frequency == 7)
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_no_long_enough_history1(self, order_history, good_pixel_id, bad_predict_at, train_horizon): 'If the `predict_at` day is too early in the `START`-`END` horizon ...\n\n ... the history of order totals is not long enough.\n ' with pytest.raises(RuntimeError): order_history.make_horizontal_ts(pixel_id=good_pixel_id, predict_at=bad_predict_at, train_horizon=train_horizon)
-133,165,761,900,441,440
If the `predict_at` day is too early in the `START`-`END` horizon ... ... the history of order totals is not long enough.
tests/forecasts/timify/test_make_time_series.py
test_no_long_enough_history1
webartifex/urban-meal-delivery
python
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_no_long_enough_history1(self, order_history, good_pixel_id, bad_predict_at, train_horizon): 'If the `predict_at` day is too early in the `START`-`END` horizon ...\n\n ... the history of order totals is not long enough.\n ' with pytest.raises(RuntimeError): order_history.make_horizontal_ts(pixel_id=good_pixel_id, predict_at=bad_predict_at, train_horizon=train_horizon)
def test_no_long_enough_history2(self, order_history, good_pixel_id, good_predict_at): 'If the `train_horizon` is longer than the `START`-`END` horizon ...\n\n ... the history of order totals can never be long enough.\n ' with pytest.raises(RuntimeError): order_history.make_horizontal_ts(pixel_id=good_pixel_id, predict_at=good_predict_at, train_horizon=999)
985,862,545,747,328,000
If the `train_horizon` is longer than the `START`-`END` horizon ... ... the history of order totals can never be long enough.
tests/forecasts/timify/test_make_time_series.py
test_no_long_enough_history2
webartifex/urban-meal-delivery
python
def test_no_long_enough_history2(self, order_history, good_pixel_id, good_predict_at): 'If the `train_horizon` is longer than the `START`-`END` horizon ...\n\n ... the history of order totals can never be long enough.\n ' with pytest.raises(RuntimeError): order_history.make_horizontal_ts(pixel_id=good_pixel_id, predict_at=good_predict_at, train_horizon=999)
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_wrong_pixel(self, order_history, good_predict_at, train_horizon): 'A `pixel_id` that is not in the `grid`.' with pytest.raises(LookupError): order_history.make_vertical_ts(pixel_id=999999, predict_day=good_predict_at.date(), train_horizon=train_horizon)
1,357,936,782,452,300,000
A `pixel_id` that is not in the `grid`.
tests/forecasts/timify/test_make_time_series.py
test_wrong_pixel
webartifex/urban-meal-delivery
python
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_wrong_pixel(self, order_history, good_predict_at, train_horizon): with pytest.raises(LookupError): order_history.make_vertical_ts(pixel_id=999999, predict_day=good_predict_at.date(), train_horizon=train_horizon)
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_time_series_are_series(self, order_history, good_pixel_id, good_predict_at, train_horizon): 'The time series come as `pd.Series`.' result = order_history.make_vertical_ts(pixel_id=good_pixel_id, predict_day=good_predict_at.date(), train_horizon=train_horizon) (training_ts, _, actuals_ts) = result assert isinstance(training_ts, pd.Series) assert (training_ts.name == 'n_orders') assert isinstance(actuals_ts, pd.Series) assert (actuals_ts.name == 'n_orders')
-6,274,435,031,828,883,000
The time series come as `pd.Series`.
tests/forecasts/timify/test_make_time_series.py
test_time_series_are_series
webartifex/urban-meal-delivery
python
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_time_series_are_series(self, order_history, good_pixel_id, good_predict_at, train_horizon): result = order_history.make_vertical_ts(pixel_id=good_pixel_id, predict_day=good_predict_at.date(), train_horizon=train_horizon) (training_ts, _, actuals_ts) = result assert isinstance(training_ts, pd.Series) assert (training_ts.name == 'n_orders') assert isinstance(actuals_ts, pd.Series) assert (actuals_ts.name == 'n_orders')
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_time_series_have_correct_length(self, order_history, good_pixel_id, good_predict_at, train_horizon): 'The length of a training time series is the product of the ...\n\n ... weekly time steps (i.e., product of `7` and the number of daily time steps)\n and the `train_horizon` in weeks.\n\n The time series with the actual order counts always holds one observation\n per time step of a day.\n ' result = order_history.make_vertical_ts(pixel_id=good_pixel_id, predict_day=good_predict_at.date(), train_horizon=train_horizon) (training_ts, _, actuals_ts) = result n_daily_time_steps = ((60 * (config.SERVICE_END - config.SERVICE_START)) // test_config.LONG_TIME_STEP) assert (len(training_ts) == ((7 * n_daily_time_steps) * train_horizon)) assert (len(actuals_ts) == n_daily_time_steps)
-1,977,358,239,434,876,000
The length of a training time series is the product of the ... ... weekly time steps (i.e., product of `7` and the number of daily time steps) and the `train_horizon` in weeks. The time series with the actual order counts always holds one observation per time step of a day.
tests/forecasts/timify/test_make_time_series.py
test_time_series_have_correct_length
webartifex/urban-meal-delivery
python
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_time_series_have_correct_length(self, order_history, good_pixel_id, good_predict_at, train_horizon): 'The length of a training time series is the product of the ...\n\n ... weekly time steps (i.e., product of `7` and the number of daily time steps)\n and the `train_horizon` in weeks.\n\n The time series with the actual order counts always holds one observation\n per time step of a day.\n ' result = order_history.make_vertical_ts(pixel_id=good_pixel_id, predict_day=good_predict_at.date(), train_horizon=train_horizon) (training_ts, _, actuals_ts) = result n_daily_time_steps = ((60 * (config.SERVICE_END - config.SERVICE_START)) // test_config.LONG_TIME_STEP) assert (len(training_ts) == ((7 * n_daily_time_steps) * train_horizon)) assert (len(actuals_ts) == n_daily_time_steps)
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_frequency_is_number_number_of_weekly_time_steps(self, order_history, good_pixel_id, good_predict_at, train_horizon): 'The `frequency` is the number of weekly time steps.' result = order_history.make_vertical_ts(pixel_id=good_pixel_id, predict_day=good_predict_at.date(), train_horizon=train_horizon) (_, frequency, _) = result n_daily_time_steps = ((60 * (config.SERVICE_END - config.SERVICE_START)) // test_config.LONG_TIME_STEP) assert (frequency == (7 * n_daily_time_steps))
4,505,564,102,296,897,500
The `frequency` is the number of weekly time steps.
tests/forecasts/timify/test_make_time_series.py
test_frequency_is_number_number_of_weekly_time_steps
webartifex/urban-meal-delivery
python
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_frequency_is_number_number_of_weekly_time_steps(self, order_history, good_pixel_id, good_predict_at, train_horizon): result = order_history.make_vertical_ts(pixel_id=good_pixel_id, predict_day=good_predict_at.date(), train_horizon=train_horizon) (_, frequency, _) = result n_daily_time_steps = ((60 * (config.SERVICE_END - config.SERVICE_START)) // test_config.LONG_TIME_STEP) assert (frequency == (7 * n_daily_time_steps))
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_no_long_enough_history1(self, order_history, good_pixel_id, bad_predict_at, train_horizon): 'If the `predict_at` day is too early in the `START`-`END` horizon ...\n\n ... the history of order totals is not long enough.\n ' with pytest.raises(RuntimeError): order_history.make_vertical_ts(pixel_id=good_pixel_id, predict_day=bad_predict_at.date(), train_horizon=train_horizon)
-168,511,720,616,015,650
If the `predict_at` day is too early in the `START`-`END` horizon ... ... the history of order totals is not long enough.
tests/forecasts/timify/test_make_time_series.py
test_no_long_enough_history1
webartifex/urban-meal-delivery
python
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_no_long_enough_history1(self, order_history, good_pixel_id, bad_predict_at, train_horizon): 'If the `predict_at` day is too early in the `START`-`END` horizon ...\n\n ... the history of order totals is not long enough.\n ' with pytest.raises(RuntimeError): order_history.make_vertical_ts(pixel_id=good_pixel_id, predict_day=bad_predict_at.date(), train_horizon=train_horizon)
def test_no_long_enough_history2(self, order_history, good_pixel_id, good_predict_at): 'If the `train_horizon` is longer than the `START`-`END` horizon ...\n\n ... the history of order totals can never be long enough.\n ' with pytest.raises(RuntimeError): order_history.make_vertical_ts(pixel_id=good_pixel_id, predict_day=good_predict_at.date(), train_horizon=999)
-159,212,202,509,557,200
If the `train_horizon` is longer than the `START`-`END` horizon ... ... the history of order totals can never be long enough.
tests/forecasts/timify/test_make_time_series.py
test_no_long_enough_history2
webartifex/urban-meal-delivery
python
def test_no_long_enough_history2(self, order_history, good_pixel_id, good_predict_at): 'If the `train_horizon` is longer than the `START`-`END` horizon ...\n\n ... the history of order totals can never be long enough.\n ' with pytest.raises(RuntimeError): order_history.make_vertical_ts(pixel_id=good_pixel_id, predict_day=good_predict_at.date(), train_horizon=999)
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_wrong_pixel(self, order_history, good_predict_at, train_horizon): 'A `pixel_id` that is not in the `grid`.' with pytest.raises(LookupError): order_history.make_realtime_ts(pixel_id=999999, predict_at=good_predict_at, train_horizon=train_horizon)
8,099,444,227,379,590,000
A `pixel_id` that is not in the `grid`.
tests/forecasts/timify/test_make_time_series.py
test_wrong_pixel
webartifex/urban-meal-delivery
python
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_wrong_pixel(self, order_history, good_predict_at, train_horizon): with pytest.raises(LookupError): order_history.make_realtime_ts(pixel_id=999999, predict_at=good_predict_at, train_horizon=train_horizon)
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_time_series_are_series(self, order_history, good_pixel_id, good_predict_at, train_horizon): 'The time series come as `pd.Series`.' result = order_history.make_realtime_ts(pixel_id=good_pixel_id, predict_at=good_predict_at, train_horizon=train_horizon) (training_ts, _, actuals_ts) = result assert isinstance(training_ts, pd.Series) assert (training_ts.name == 'n_orders') assert isinstance(actuals_ts, pd.Series) assert (actuals_ts.name == 'n_orders')
-5,622,506,090,569,481,000
The time series come as `pd.Series`.
tests/forecasts/timify/test_make_time_series.py
test_time_series_are_series
webartifex/urban-meal-delivery
python
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_time_series_are_series(self, order_history, good_pixel_id, good_predict_at, train_horizon): result = order_history.make_realtime_ts(pixel_id=good_pixel_id, predict_at=good_predict_at, train_horizon=train_horizon) (training_ts, _, actuals_ts) = result assert isinstance(training_ts, pd.Series) assert (training_ts.name == 'n_orders') assert isinstance(actuals_ts, pd.Series) assert (actuals_ts.name == 'n_orders')
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_time_series_have_correct_length1(self, order_history, good_pixel_id, good_predict_at, train_horizon): 'The length of a training time series is the product of the ...\n\n ... weekly time steps (i.e., product of `7` and the number of daily time steps)\n and the `train_horizon` in weeks; however, this assertion only holds if\n we predict the first `time_step` of the day.\n\n The time series with the actual order counts always holds `1` value.\n ' predict_at = datetime.datetime(good_predict_at.year, good_predict_at.month, good_predict_at.day, config.SERVICE_START, 0) result = order_history.make_realtime_ts(pixel_id=good_pixel_id, predict_at=predict_at, train_horizon=train_horizon) (training_ts, _, actuals_ts) = result n_daily_time_steps = ((60 * (config.SERVICE_END - config.SERVICE_START)) // test_config.LONG_TIME_STEP) assert (len(training_ts) == ((7 * n_daily_time_steps) * train_horizon)) assert (len(actuals_ts) == 1)
7,418,191,010,827,245,000
The length of a training time series is the product of the ... ... weekly time steps (i.e., product of `7` and the number of daily time steps) and the `train_horizon` in weeks; however, this assertion only holds if we predict the first `time_step` of the day. The time series with the actual order counts always holds `1` value.
tests/forecasts/timify/test_make_time_series.py
test_time_series_have_correct_length1
webartifex/urban-meal-delivery
python
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_time_series_have_correct_length1(self, order_history, good_pixel_id, good_predict_at, train_horizon): 'The length of a training time series is the product of the ...\n\n ... weekly time steps (i.e., product of `7` and the number of daily time steps)\n and the `train_horizon` in weeks; however, this assertion only holds if\n we predict the first `time_step` of the day.\n\n The time series with the actual order counts always holds `1` value.\n ' predict_at = datetime.datetime(good_predict_at.year, good_predict_at.month, good_predict_at.day, config.SERVICE_START, 0) result = order_history.make_realtime_ts(pixel_id=good_pixel_id, predict_at=predict_at, train_horizon=train_horizon) (training_ts, _, actuals_ts) = result n_daily_time_steps = ((60 * (config.SERVICE_END - config.SERVICE_START)) // test_config.LONG_TIME_STEP) assert (len(training_ts) == ((7 * n_daily_time_steps) * train_horizon)) assert (len(actuals_ts) == 1)
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_time_series_have_correct_length2(self, order_history, good_pixel_id, good_predict_at, train_horizon): 'The length of a training time series is the product of the ...\n\n ... weekly time steps (i.e., product of `7` and the number of daily time steps)\n and the `train_horizon` in weeks; however, this assertion only holds if\n we predict the first `time_step` of the day. Predicting any other `time_step`\n means that the training time series becomes longer by the number of time steps\n before the one being predicted.\n\n The time series with the actual order counts always holds `1` value.\n ' assert (good_predict_at.hour == test_config.NOON) result = order_history.make_realtime_ts(pixel_id=good_pixel_id, predict_at=good_predict_at, train_horizon=train_horizon) (training_ts, _, actuals_ts) = result n_daily_time_steps = ((60 * (config.SERVICE_END - config.SERVICE_START)) // test_config.LONG_TIME_STEP) n_time_steps_before = ((60 * (test_config.NOON - config.SERVICE_START)) // test_config.LONG_TIME_STEP) assert (len(training_ts) == (((7 * n_daily_time_steps) * train_horizon) + n_time_steps_before)) assert (len(actuals_ts) == 1)
-5,741,354,582,931,707,000
The length of a training time series is the product of the ... ... weekly time steps (i.e., product of `7` and the number of daily time steps) and the `train_horizon` in weeks; however, this assertion only holds if we predict the first `time_step` of the day. Predicting any other `time_step` means that the training time series becomes longer by the number of time steps before the one being predicted. The time series with the actual order counts always holds `1` value.
tests/forecasts/timify/test_make_time_series.py
test_time_series_have_correct_length2
webartifex/urban-meal-delivery
python
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_time_series_have_correct_length2(self, order_history, good_pixel_id, good_predict_at, train_horizon): 'The length of a training time series is the product of the ...\n\n ... weekly time steps (i.e., product of `7` and the number of daily time steps)\n and the `train_horizon` in weeks; however, this assertion only holds if\n we predict the first `time_step` of the day. Predicting any other `time_step`\n means that the training time series becomes longer by the number of time steps\n before the one being predicted.\n\n The time series with the actual order counts always holds `1` value.\n ' assert (good_predict_at.hour == test_config.NOON) result = order_history.make_realtime_ts(pixel_id=good_pixel_id, predict_at=good_predict_at, train_horizon=train_horizon) (training_ts, _, actuals_ts) = result n_daily_time_steps = ((60 * (config.SERVICE_END - config.SERVICE_START)) // test_config.LONG_TIME_STEP) n_time_steps_before = ((60 * (test_config.NOON - config.SERVICE_START)) // test_config.LONG_TIME_STEP) assert (len(training_ts) == (((7 * n_daily_time_steps) * train_horizon) + n_time_steps_before)) assert (len(actuals_ts) == 1)
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_frequency_is_number_number_of_weekly_time_steps(self, order_history, good_pixel_id, good_predict_at, train_horizon): 'The `frequency` is the number of weekly time steps.' result = order_history.make_realtime_ts(pixel_id=good_pixel_id, predict_at=good_predict_at, train_horizon=train_horizon) (_, frequency, _) = result n_daily_time_steps = ((60 * (config.SERVICE_END - config.SERVICE_START)) // test_config.LONG_TIME_STEP) assert (frequency == (7 * n_daily_time_steps))
7,710,180,793,476,809,000
The `frequency` is the number of weekly time steps.
tests/forecasts/timify/test_make_time_series.py
test_frequency_is_number_number_of_weekly_time_steps
webartifex/urban-meal-delivery
python
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_frequency_is_number_number_of_weekly_time_steps(self, order_history, good_pixel_id, good_predict_at, train_horizon): result = order_history.make_realtime_ts(pixel_id=good_pixel_id, predict_at=good_predict_at, train_horizon=train_horizon) (_, frequency, _) = result n_daily_time_steps = ((60 * (config.SERVICE_END - config.SERVICE_START)) // test_config.LONG_TIME_STEP) assert (frequency == (7 * n_daily_time_steps))
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_no_long_enough_history1(self, order_history, good_pixel_id, bad_predict_at, train_horizon): 'If the `predict_at` day is too early in the `START`-`END` horizon ...\n\n ... the history of order totals is not long enough.\n ' with pytest.raises(RuntimeError): order_history.make_realtime_ts(pixel_id=good_pixel_id, predict_at=bad_predict_at, train_horizon=train_horizon)
-9,063,957,476,974,908,000
If the `predict_at` day is too early in the `START`-`END` horizon ... ... the history of order totals is not long enough.
tests/forecasts/timify/test_make_time_series.py
test_no_long_enough_history1
webartifex/urban-meal-delivery
python
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS) def test_no_long_enough_history1(self, order_history, good_pixel_id, bad_predict_at, train_horizon): 'If the `predict_at` day is too early in the `START`-`END` horizon ...\n\n ... the history of order totals is not long enough.\n ' with pytest.raises(RuntimeError): order_history.make_realtime_ts(pixel_id=good_pixel_id, predict_at=bad_predict_at, train_horizon=train_horizon)
def test_no_long_enough_history2(self, order_history, good_pixel_id, good_predict_at): 'If the `train_horizon` is longer than the `START`-`END` horizon ...\n\n ... the history of order totals can never be long enough.\n ' with pytest.raises(RuntimeError): order_history.make_realtime_ts(pixel_id=good_pixel_id, predict_at=good_predict_at, train_horizon=999)
-6,738,553,165,639,628,000
If the `train_horizon` is longer than the `START`-`END` horizon ... ... the history of order totals can never be long enough.
tests/forecasts/timify/test_make_time_series.py
test_no_long_enough_history2
webartifex/urban-meal-delivery
python
def test_no_long_enough_history2(self, order_history, good_pixel_id, good_predict_at): 'If the `train_horizon` is longer than the `START`-`END` horizon ...\n\n ... the history of order totals can never be long enough.\n ' with pytest.raises(RuntimeError): order_history.make_realtime_ts(pixel_id=good_pixel_id, predict_at=good_predict_at, train_horizon=999)
def __init__(self, master=None, session=None): '\n Constructor of the class\n ' super().__init__(master) self.master = master self.grid(row=0, column=0) self.session = session self.mealname = tk.StringVar() self.create_widgets()
-2,930,560,312,995,248,600
Constructor of the class
gui/addmealpopup.py
__init__
Penaz91/fjournal
python
def __init__(self, master=None, session=None): '\n \n ' super().__init__(master) self.master = master self.grid(row=0, column=0) self.session = session self.mealname = tk.StringVar() self.create_widgets()
def create_widgets(self): '\n Creates the widgets for the popup\n ' self.meallbl = ttk.Label(self, text='Meal Name') self.meallbl.grid(row=0, column=0) self.mealinput = ttk.Entry(self, textvariable=self.mealname) self.mealinput.grid(row=0, column=1) self.addbtn = ttk.Button(self, text='Confirm', command=self.add_meal) self.addbtn.grid(row=1, column=0, columnspan=2)
5,257,788,785,566,407,000
Creates the widgets for the popup
gui/addmealpopup.py
create_widgets
Penaz91/fjournal
python
def create_widgets(self): '\n \n ' self.meallbl = ttk.Label(self, text='Meal Name') self.meallbl.grid(row=0, column=0) self.mealinput = ttk.Entry(self, textvariable=self.mealname) self.mealinput.grid(row=0, column=1) self.addbtn = ttk.Button(self, text='Confirm', command=self.add_meal) self.addbtn.grid(row=1, column=0, columnspan=2)
def add_meal(self): '\n Opens the Add Meal popup\n ' meal = Meal(name=self.mealname.get()) self.session.add(meal) self.session.commit() self.master.destroy()
-7,492,770,554,143,263,000
Opens the Add Meal popup
gui/addmealpopup.py
add_meal
Penaz91/fjournal
python
def add_meal(self): '\n \n ' meal = Meal(name=self.mealname.get()) self.session.add(meal) self.session.commit() self.master.destroy()
def create_or_update_storage_password(self, props, logger): '\n unencrypted password in inputs.conf, encrypt it and store as storagePassword\n ' try: locale = 'reference' storage_passwords = self.service.storage_passwords if (props['username'] in storage_passwords): locale = 'delete' storage_passwords.delete(props['username']) except Exception as e: logger('ERROR', 'Error at locale {1} in create_or_update_storage_password: {0}'.format(e, locale)) try: locale = 'create' self.service.storage_passwords.create(props['password'], props['username']) except Exception as e: logger('ERROR', 'Error at locale {1} in create_or_update_storage_password: {0}'.format(e, locale))
5,640,986,405,998,167,000
unencrypted password in inputs.conf, encrypt it and store as storagePassword
bin/azure_monitor_metrics_main.py
create_or_update_storage_password
sebastus/AzureMonitorAddonForSplunk
python
def create_or_update_storage_password(self, props, logger): '\n \n ' try: locale = 'reference' storage_passwords = self.service.storage_passwords if (props['username'] in storage_passwords): locale = 'delete' storage_passwords.delete(props['username']) except Exception as e: logger('ERROR', 'Error at locale {1} in create_or_update_storage_password: {0}'.format(e, locale)) try: locale = 'create' self.service.storage_passwords.create(props['password'], props['username']) except Exception as e: logger('ERROR', 'Error at locale {1} in create_or_update_storage_password: {0}'.format(e, locale))
def mask_id_and_key(self, name, logger): '\n masks the app_id and app_key in inputs.conf\n ' (kind, input_name) = name.split('://') item = self.service.inputs.__getitem__((input_name, kind)) try: new_input = {'vaultName': item.content.vaultName, 'SPNTenantID': item.content.SPNTenantID, 'SPNApplicationId': MASK, 'SPNApplicationKey': MASK, 'SubscriptionId': item.content.SubscriptionId, 'secretName': item.content.secretName, 'secretVersion': item.content.secretVersion, 'index': item.content.index, 'interval': item.content.interval, 'sourcetype': item.content.sourcetype} item.update(**new_input).refresh() except Exception as e: logger('ERROR', 'Error caught in mask_id_and_key: {0}'.format(e))
5,995,213,074,449,839,000
masks the app_id and app_key in inputs.conf
bin/azure_monitor_metrics_main.py
mask_id_and_key
sebastus/AzureMonitorAddonForSplunk
python
def mask_id_and_key(self, name, logger): '\n \n ' (kind, input_name) = name.split('://') item = self.service.inputs.__getitem__((input_name, kind)) try: new_input = {'vaultName': item.content.vaultName, 'SPNTenantID': item.content.SPNTenantID, 'SPNApplicationId': MASK, 'SPNApplicationKey': MASK, 'SubscriptionId': item.content.SubscriptionId, 'secretName': item.content.secretName, 'secretVersion': item.content.secretVersion, 'index': item.content.index, 'interval': item.content.interval, 'sourcetype': item.content.sourcetype} item.update(**new_input).refresh() except Exception as e: logger('ERROR', 'Error caught in mask_id_and_key: {0}'.format(e))
def get_or_store_secrets(self, inputs, logger): '\n Either read existing encyrpted password or encrypt clear text password and store it\n Either way, return a set of clear text credentials\n ' input_items = inputs.inputs.itervalues().next() input_name = inputs.inputs.iterkeys().next() credentials = {} storage_passwords = self.service.storage_passwords props_app_id = {} props_app_id['username'] = 'AzureMonitorMetricsAppID-{0}'.format(input_name.replace(':', '_')) props_app_id['password'] = input_items.get('SPNApplicationId') props_app_key = {} props_app_key['username'] = 'AzureMonitorMetricsAppKey-{0}'.format(input_name.replace(':', '_')) props_app_key['password'] = input_items.get('SPNApplicationKey') app_id = input_items.get('SPNApplicationId') app_key = input_items.get('SPNApplicationKey') if ((app_id is not None) and (app_key is not None)): try: if (('AzureMonitorMetricsAppID' in storage_passwords) and (props_app_id['username'] not in storage_passwords)): modify_storage_password(self, 'AzureMonitorMetricsAppID', props_app_id['username'], logger) if (('AzureMonitorMetricsAppKey' in storage_passwords) and (props_app_key['username'] not in storage_passwords)): modify_storage_password(self, 'AzureMonitorMetricsAppKey', props_app_key['username'], logger) if (props_app_id['password'] == MASK): (app_id, app_key) = get_app_id_and_key(self, props_app_id, props_app_key, logger) else: create_or_update_storage_password(self, props_app_id, logger) create_or_update_storage_password(self, props_app_key, logger) mask_id_and_key(self, input_name, logger) except Exception as e: logger('ERROR', 'Error caught in get_or_store_secrets: {0}'.format(e)) credentials['app_id'] = app_id credentials['app_key'] = app_key return credentials
977,659,677,259,260,400
Either read existing encyrpted password or encrypt clear text password and store it Either way, return a set of clear text credentials
bin/azure_monitor_metrics_main.py
get_or_store_secrets
sebastus/AzureMonitorAddonForSplunk
python
def get_or_store_secrets(self, inputs, logger): '\n Either read existing encyrpted password or encrypt clear text password and store it\n Either way, return a set of clear text credentials\n ' input_items = inputs.inputs.itervalues().next() input_name = inputs.inputs.iterkeys().next() credentials = {} storage_passwords = self.service.storage_passwords props_app_id = {} props_app_id['username'] = 'AzureMonitorMetricsAppID-{0}'.format(input_name.replace(':', '_')) props_app_id['password'] = input_items.get('SPNApplicationId') props_app_key = {} props_app_key['username'] = 'AzureMonitorMetricsAppKey-{0}'.format(input_name.replace(':', '_')) props_app_key['password'] = input_items.get('SPNApplicationKey') app_id = input_items.get('SPNApplicationId') app_key = input_items.get('SPNApplicationKey') if ((app_id is not None) and (app_key is not None)): try: if (('AzureMonitorMetricsAppID' in storage_passwords) and (props_app_id['username'] not in storage_passwords)): modify_storage_password(self, 'AzureMonitorMetricsAppID', props_app_id['username'], logger) if (('AzureMonitorMetricsAppKey' in storage_passwords) and (props_app_key['username'] not in storage_passwords)): modify_storage_password(self, 'AzureMonitorMetricsAppKey', props_app_key['username'], logger) if (props_app_id['password'] == MASK): (app_id, app_key) = get_app_id_and_key(self, props_app_id, props_app_key, logger) else: create_or_update_storage_password(self, props_app_id, logger) create_or_update_storage_password(self, props_app_key, logger) mask_id_and_key(self, input_name, logger) except Exception as e: logger('ERROR', 'Error caught in get_or_store_secrets: {0}'.format(e)) credentials['app_id'] = app_id credentials['app_key'] = app_key return credentials
def get_app_id_and_key(self, props_app_id, props_app_key, logger): '\n get the encrypted app_id and app_key from storage_passwords\n ' storage_passwords = self.service.storage_passwords if (props_app_id['username'] not in storage_passwords): raise KeyError('Did not find app_id {} in storage_passwords.'.format(props_app_id['username'])) if (props_app_key['username'] not in storage_passwords): raise KeyError('Did not find app_id {} in storage_passwords.'.format(props_app_key['username'])) app_id = '' app_key = '' try: app_id = storage_passwords[props_app_id['username']].clear_password app_key = storage_passwords[props_app_key['username']].clear_password except Exception as e: logger('ERROR', 'Error caught in get_app_id_and_key: {0}'.format(e)) return (app_id, app_key)
3,914,703,477,364,691,500
get the encrypted app_id and app_key from storage_passwords
bin/azure_monitor_metrics_main.py
get_app_id_and_key
sebastus/AzureMonitorAddonForSplunk
python
def get_app_id_and_key(self, props_app_id, props_app_key, logger): '\n \n ' storage_passwords = self.service.storage_passwords if (props_app_id['username'] not in storage_passwords): raise KeyError('Did not find app_id {} in storage_passwords.'.format(props_app_id['username'])) if (props_app_key['username'] not in storage_passwords): raise KeyError('Did not find app_id {} in storage_passwords.'.format(props_app_key['username'])) app_id = app_key = try: app_id = storage_passwords[props_app_id['username']].clear_password app_key = storage_passwords[props_app_key['username']].clear_password except Exception as e: logger('ERROR', 'Error caught in get_app_id_and_key: {0}'.format(e)) return (app_id, app_key)
def get_resources_for_rgs(ew, bearer_token, sub_url, resource_groups, input_sourcetype, checkpoint_dict): '\n map the resource groups to a function that gets resources\n ' resource_group_names = [] for resource_group in resource_groups: resource_group_names.append(resource_group['name']) with futures.ThreadPoolExecutor(max_workers=5) as executor: rg_future = dict(((executor.submit(get_resources, ew, bearer_token, sub_url, rg), rg) for rg in resource_group_names)) for future in futures.as_completed(rg_future, None): resource_group = rg_future[future] if (future.exception() is not None): ew.log('ERROR', 'Resource group {0} generated an exception: {1}'.format(resource_group, future.exception())) else: get_metrics_for_resources(ew, bearer_token, sub_url, resource_group, future.result(), input_sourcetype, checkpoint_dict)
-5,780,757,164,133,059,000
map the resource groups to a function that gets resources
bin/azure_monitor_metrics_main.py
get_resources_for_rgs
sebastus/AzureMonitorAddonForSplunk
python
def get_resources_for_rgs(ew, bearer_token, sub_url, resource_groups, input_sourcetype, checkpoint_dict): '\n \n ' resource_group_names = [] for resource_group in resource_groups: resource_group_names.append(resource_group['name']) with futures.ThreadPoolExecutor(max_workers=5) as executor: rg_future = dict(((executor.submit(get_resources, ew, bearer_token, sub_url, rg), rg) for rg in resource_group_names)) for future in futures.as_completed(rg_future, None): resource_group = rg_future[future] if (future.exception() is not None): ew.log('ERROR', 'Resource group {0} generated an exception: {1}'.format(resource_group, future.exception())) else: get_metrics_for_resources(ew, bearer_token, sub_url, resource_group, future.result(), input_sourcetype, checkpoint_dict)
def get_metrics_for_subscription(inputs, credentials, ew): '\n top level function\n given subscription id and credentials, get metrics for all resources with the right tags\n splunk sends an array of inputs, but only one element, hence the [0]\n ' metadata = inputs.metadata (input_name, input_item) = inputs.inputs.popitem() stanza = input_name.split('://') instance_name = stanza[1] try: locale = 'checkpoint file data' checkpoint_dir = metadata['checkpoint_dir'] checkpoint_dict = {'checkpoint_dir': checkpoint_dir, 'instance_name': instance_name} locale = 'put_time_window' put_time_window(ew, checkpoint_dict) locale = 'put_time_checkpoint' put_time_checkpoint(ew, checkpoint_dict) tenant_id = input_item.get('SPNTenantID') spn_client_id = credentials.get('app_id') spn_client_secret = credentials.get('app_key') subscription_id = input_item.get('SubscriptionId') key_vault_name = input_item.get('vaultName') secret_name = input_item.get('secretName') secret_version = input_item.get('secretVersion') input_sourcetype = input_item.get('sourcetype') arm_creds = {} if ((spn_client_id is not None) and (spn_client_secret is not None)): locale = 'get_access_token for key vault SPN' authentication_endpoint = 'https://login.windows.net/' resource = 'https://vault.azure.net' kv_bearer_token = get_access_token(tenant_id, spn_client_id, spn_client_secret, authentication_endpoint, resource) locale = 'get_secret_from_keyvault' arm_creds = get_secret_from_keyvault(ew, kv_bearer_token, key_vault_name, secret_name, secret_version) locale = 'get_access_token' authentication_endpoint = get_azure_environment('Azure')['activeDirectoryEndpointUrl'] resource = get_azure_environment('Azure')['activeDirectoryResourceId'] bearer_token = get_access_token(tenant_id, arm_creds.get('spn_client_id'), arm_creds.get('spn_client_secret'), authentication_endpoint, resource) locale = 'get_azure_environment' resource_mgr_endpoint_url = get_azure_environment('Azure')['resourceManagerEndpointUrl'] locale = 'get_subscription_segment' sub_url = (resource_mgr_endpoint_url + get_subscription_segment(subscription_id)) locale = 'get_resources' resource_groups = get_resources(ew, bearer_token, sub_url) locale = 'get_resources_for_rgs' get_resources_for_rgs(ew, bearer_token, sub_url, resource_groups, input_sourcetype, checkpoint_dict) except: ew.log('ERROR', 'Error caught in get_metrics_for_subscription, type: {0}, value: {1}, locale = {2}'.format(sys.exc_info()[0], sys.exc_info()[1], locale))
-3,657,075,620,885,416,400
top level function given subscription id and credentials, get metrics for all resources with the right tags splunk sends an array of inputs, but only one element, hence the [0]
bin/azure_monitor_metrics_main.py
get_metrics_for_subscription
sebastus/AzureMonitorAddonForSplunk
python
def get_metrics_for_subscription(inputs, credentials, ew): '\n top level function\n given subscription id and credentials, get metrics for all resources with the right tags\n splunk sends an array of inputs, but only one element, hence the [0]\n ' metadata = inputs.metadata (input_name, input_item) = inputs.inputs.popitem() stanza = input_name.split('://') instance_name = stanza[1] try: locale = 'checkpoint file data' checkpoint_dir = metadata['checkpoint_dir'] checkpoint_dict = {'checkpoint_dir': checkpoint_dir, 'instance_name': instance_name} locale = 'put_time_window' put_time_window(ew, checkpoint_dict) locale = 'put_time_checkpoint' put_time_checkpoint(ew, checkpoint_dict) tenant_id = input_item.get('SPNTenantID') spn_client_id = credentials.get('app_id') spn_client_secret = credentials.get('app_key') subscription_id = input_item.get('SubscriptionId') key_vault_name = input_item.get('vaultName') secret_name = input_item.get('secretName') secret_version = input_item.get('secretVersion') input_sourcetype = input_item.get('sourcetype') arm_creds = {} if ((spn_client_id is not None) and (spn_client_secret is not None)): locale = 'get_access_token for key vault SPN' authentication_endpoint = 'https://login.windows.net/' resource = 'https://vault.azure.net' kv_bearer_token = get_access_token(tenant_id, spn_client_id, spn_client_secret, authentication_endpoint, resource) locale = 'get_secret_from_keyvault' arm_creds = get_secret_from_keyvault(ew, kv_bearer_token, key_vault_name, secret_name, secret_version) locale = 'get_access_token' authentication_endpoint = get_azure_environment('Azure')['activeDirectoryEndpointUrl'] resource = get_azure_environment('Azure')['activeDirectoryResourceId'] bearer_token = get_access_token(tenant_id, arm_creds.get('spn_client_id'), arm_creds.get('spn_client_secret'), authentication_endpoint, resource) locale = 'get_azure_environment' resource_mgr_endpoint_url = get_azure_environment('Azure')['resourceManagerEndpointUrl'] locale = 'get_subscription_segment' sub_url = (resource_mgr_endpoint_url + get_subscription_segment(subscription_id)) locale = 'get_resources' resource_groups = get_resources(ew, bearer_token, sub_url) locale = 'get_resources_for_rgs' get_resources_for_rgs(ew, bearer_token, sub_url, resource_groups, input_sourcetype, checkpoint_dict) except: ew.log('ERROR', 'Error caught in get_metrics_for_subscription, type: {0}, value: {1}, locale = {2}'.format(sys.exc_info()[0], sys.exc_info()[1], locale))
def get_edge_bin(array): 'Detect the edge indcies of a binary 1-D array.\n\n Args:\n array (:class:`numpy.ndarray`): A list or Numpy 1d array, with binary\n (0/1) or boolean (True/False) values.\n\n Returns:\n list: A list containing starting and ending indices of the non-zero\n blocks.\n\n Examples:\n\n .. code-block:: python\n\n >>> a = [0,1,1,0,0,0,1,0,1]\n >>> get_edge_bin(a)\n [(1, 3), (6, 7), (8, 9)]\n >>> b = [True, False, True, True, False, False]\n >>> get_edge_bin(b)\n [(0, 1), (2, 4)]\n ' array1 = np.int64(array) array1 = np.insert(array1, 0, 0) array1 = np.append(array1, 0) tmp = (array1 - np.roll(array1, 1)) i1_lst = (np.nonzero((tmp == 1))[0] - 1) i2_lst = (np.nonzero((tmp == (- 1)))[0] - 1) return list(zip(i1_lst, i2_lst))
8,507,008,367,016,243,000
Detect the edge indcies of a binary 1-D array. Args: array (:class:`numpy.ndarray`): A list or Numpy 1d array, with binary (0/1) or boolean (True/False) values. Returns: list: A list containing starting and ending indices of the non-zero blocks. Examples: .. code-block:: python >>> a = [0,1,1,0,0,0,1,0,1] >>> get_edge_bin(a) [(1, 3), (6, 7), (8, 9)] >>> b = [True, False, True, True, False, False] >>> get_edge_bin(b) [(0, 1), (2, 4)]
gamse/utils/onedarray.py
get_edge_bin
wangleon/gamse
python
def get_edge_bin(array): 'Detect the edge indcies of a binary 1-D array.\n\n Args:\n array (:class:`numpy.ndarray`): A list or Numpy 1d array, with binary\n (0/1) or boolean (True/False) values.\n\n Returns:\n list: A list containing starting and ending indices of the non-zero\n blocks.\n\n Examples:\n\n .. code-block:: python\n\n >>> a = [0,1,1,0,0,0,1,0,1]\n >>> get_edge_bin(a)\n [(1, 3), (6, 7), (8, 9)]\n >>> b = [True, False, True, True, False, False]\n >>> get_edge_bin(b)\n [(0, 1), (2, 4)]\n ' array1 = np.int64(array) array1 = np.insert(array1, 0, 0) array1 = np.append(array1, 0) tmp = (array1 - np.roll(array1, 1)) i1_lst = (np.nonzero((tmp == 1))[0] - 1) i2_lst = (np.nonzero((tmp == (- 1)))[0] - 1) return list(zip(i1_lst, i2_lst))
def get_local_minima(x, window=None): 'Get the local minima of a 1d array in a window.\n\n Args:\n x (:class:`numpy.ndarray`): A list or Numpy 1d array.\n window (*int* or :class:`numpy.ndarray`): An odd integer or a list of\n odd integers as the lengthes of searching window.\n Returns:\n tuple: A tuple containing:\n\n * **index** (:class:`numpy.ndarray`): A numpy 1d array containing \n indices of all local minima.\n * **x[index]** (:class:`numpy.ndarray`): A numpy 1d array containing\n values of all local minima.\n\n ' x = np.array(x) dif = np.diff(x) ind = (dif > 0) tmp = np.logical_xor(ind, np.roll(ind, 1)) idx = np.logical_and(tmp, ind) index = np.where(idx)[0] if (window is None): return (index, x[index]) else: if isinstance(window, int): window = np.repeat(window, len(x)) elif isinstance(window, np.ndarray): if (window.dtype.type in [np.int16, np.int32, np.int64]): pass else: print('window array are not integers') raise ValueError else: raise ValueError if (0 in (window % 2)): raise ValueError halfwin_lst = ((window - 1) // 2) index_lst = [] for i in index: halfwin = halfwin_lst[i] i1 = max(0, (i - halfwin)) i2 = min(((i + halfwin) + 1), len(x)) if (i == (x[i1:i2].argmin() + i1)): index_lst.append(i) if (len(index_lst) > 0): index_lst = np.array(index_lst) return (index_lst, x[index_lst]) else: return (np.array([]), np.array([]))
-3,854,103,118,873,968,000
Get the local minima of a 1d array in a window. Args: x (:class:`numpy.ndarray`): A list or Numpy 1d array. window (*int* or :class:`numpy.ndarray`): An odd integer or a list of odd integers as the lengthes of searching window. Returns: tuple: A tuple containing: * **index** (:class:`numpy.ndarray`): A numpy 1d array containing indices of all local minima. * **x[index]** (:class:`numpy.ndarray`): A numpy 1d array containing values of all local minima.
gamse/utils/onedarray.py
get_local_minima
wangleon/gamse
python
def get_local_minima(x, window=None): 'Get the local minima of a 1d array in a window.\n\n Args:\n x (:class:`numpy.ndarray`): A list or Numpy 1d array.\n window (*int* or :class:`numpy.ndarray`): An odd integer or a list of\n odd integers as the lengthes of searching window.\n Returns:\n tuple: A tuple containing:\n\n * **index** (:class:`numpy.ndarray`): A numpy 1d array containing \n indices of all local minima.\n * **x[index]** (:class:`numpy.ndarray`): A numpy 1d array containing\n values of all local minima.\n\n ' x = np.array(x) dif = np.diff(x) ind = (dif > 0) tmp = np.logical_xor(ind, np.roll(ind, 1)) idx = np.logical_and(tmp, ind) index = np.where(idx)[0] if (window is None): return (index, x[index]) else: if isinstance(window, int): window = np.repeat(window, len(x)) elif isinstance(window, np.ndarray): if (window.dtype.type in [np.int16, np.int32, np.int64]): pass else: print('window array are not integers') raise ValueError else: raise ValueError if (0 in (window % 2)): raise ValueError halfwin_lst = ((window - 1) // 2) index_lst = [] for i in index: halfwin = halfwin_lst[i] i1 = max(0, (i - halfwin)) i2 = min(((i + halfwin) + 1), len(x)) if (i == (x[i1:i2].argmin() + i1)): index_lst.append(i) if (len(index_lst) > 0): index_lst = np.array(index_lst) return (index_lst, x[index_lst]) else: return (np.array([]), np.array([]))
def implete_none(lst): 'Replace the None elemnets at the beginning and the end of list by auto\n increment integers.\n \n Convert the first and last few `None` elements to auto increment integers.\n These integers are determined by the first and last integers in the input\n array.\n While the `None` elements between two integers in the input list will\n remain.\n\n Args:\n lst (list): A list contaning None values.\n Returns:\n newlst (list): A list containing auto increment integers.\n\t\n Examples:\n .. code-block:: python\n\n >>> a = [None,None,3,4,None,5,6,None,None]\n >>> implete_none(a)\n [1, 2, 3, 4, None, 5, 6, 7, 8]\n\n ' notnone_lst = [v for v in lst if (v is not None)] for (i, v) in enumerate(lst): if (v == notnone_lst[0]): notnone1 = i value1 = v if (v == notnone_lst[(- 1)]): notnone2 = i value2 = v newlst = [] for (i, v) in enumerate(lst): if (i < notnone1): newlst.append((value1 - (notnone1 - i))) elif (i > notnone2): newlst.append((value2 + (i - notnone2))) else: newlst.append(v) return newlst
-2,926,428,472,532,981,000
Replace the None elemnets at the beginning and the end of list by auto increment integers. Convert the first and last few `None` elements to auto increment integers. These integers are determined by the first and last integers in the input array. While the `None` elements between two integers in the input list will remain. Args: lst (list): A list contaning None values. Returns: newlst (list): A list containing auto increment integers. Examples: .. code-block:: python >>> a = [None,None,3,4,None,5,6,None,None] >>> implete_none(a) [1, 2, 3, 4, None, 5, 6, 7, 8]
gamse/utils/onedarray.py
implete_none
wangleon/gamse
python
def implete_none(lst): 'Replace the None elemnets at the beginning and the end of list by auto\n increment integers.\n \n Convert the first and last few `None` elements to auto increment integers.\n These integers are determined by the first and last integers in the input\n array.\n While the `None` elements between two integers in the input list will\n remain.\n\n Args:\n lst (list): A list contaning None values.\n Returns:\n newlst (list): A list containing auto increment integers.\n\t\n Examples:\n .. code-block:: python\n\n >>> a = [None,None,3,4,None,5,6,None,None]\n >>> implete_none(a)\n [1, 2, 3, 4, None, 5, 6, 7, 8]\n\n ' notnone_lst = [v for v in lst if (v is not None)] for (i, v) in enumerate(lst): if (v == notnone_lst[0]): notnone1 = i value1 = v if (v == notnone_lst[(- 1)]): notnone2 = i value2 = v newlst = [] for (i, v) in enumerate(lst): if (i < notnone1): newlst.append((value1 - (notnone1 - i))) elif (i > notnone2): newlst.append((value2 + (i - notnone2))) else: newlst.append(v) return newlst
def derivative(*args, **kwargs): 'Get the first derivative of data arrays (*x*, *y*).\n\n If **y** is not given, the first argument will be taken as **y**, and the\n differential of the input array will be returned.\n\n Args:\n x (list or :class:`numpy.ndarray`): X-values of the input array (optional).\n y (list or :class:`numpy.ndarray`): Y-values of the input array.\n points (int): Number of points used to calculate derivative\n (optional, default is 3).\n\n Returns:\n :class:`numpy.ndarray`: Derivative of the input array.\n ' if (len(args) == 1): y = np.array(args[0], dtype=np.float64) x = np.arange(y.size) elif (len(args) == 2): x = np.array(args[0], dtype=np.float64) y = np.array(args[1], dtype=np.float64) else: raise ValueError npts = x.size points = kwargs.pop('points', 3) if (points == 3): der = ((np.roll(y, (- 1)) - np.roll(y, 1)) / (np.roll(x, (- 1)) - np.roll(x, 1))) a = np.array([(- 3.0), 4.0, (- 1.0)]) der[0] = ((a * y[0:3]).sum() / (a * x[0:3]).sum()) der[(- 1)] = (((- a[::(- 1)]) * y[(- 3):]).sum() / ((- a[::(- 1)]) * x[(- 3):]).sum()) return der else: raise ValueError
-1,023,508,425,012,736,900
Get the first derivative of data arrays (*x*, *y*). If **y** is not given, the first argument will be taken as **y**, and the differential of the input array will be returned. Args: x (list or :class:`numpy.ndarray`): X-values of the input array (optional). y (list or :class:`numpy.ndarray`): Y-values of the input array. points (int): Number of points used to calculate derivative (optional, default is 3). Returns: :class:`numpy.ndarray`: Derivative of the input array.
gamse/utils/onedarray.py
derivative
wangleon/gamse
python
def derivative(*args, **kwargs): 'Get the first derivative of data arrays (*x*, *y*).\n\n If **y** is not given, the first argument will be taken as **y**, and the\n differential of the input array will be returned.\n\n Args:\n x (list or :class:`numpy.ndarray`): X-values of the input array (optional).\n y (list or :class:`numpy.ndarray`): Y-values of the input array.\n points (int): Number of points used to calculate derivative\n (optional, default is 3).\n\n Returns:\n :class:`numpy.ndarray`: Derivative of the input array.\n ' if (len(args) == 1): y = np.array(args[0], dtype=np.float64) x = np.arange(y.size) elif (len(args) == 2): x = np.array(args[0], dtype=np.float64) y = np.array(args[1], dtype=np.float64) else: raise ValueError npts = x.size points = kwargs.pop('points', 3) if (points == 3): der = ((np.roll(y, (- 1)) - np.roll(y, 1)) / (np.roll(x, (- 1)) - np.roll(x, 1))) a = np.array([(- 3.0), 4.0, (- 1.0)]) der[0] = ((a * y[0:3]).sum() / (a * x[0:3]).sum()) der[(- 1)] = (((- a[::(- 1)]) * y[(- 3):]).sum() / ((- a[::(- 1)]) * x[(- 3):]).sum()) return der else: raise ValueError
def pairwise(array): 'Return pairwises of an iterable arrary.\n\n Args:\n array (list or :class:`numpy.ndarray`): The input iterable array.\n Returns:\n :class:`zip`: zip objects.\n ' (a, b) = tee(array) next(b, None) return zip(a, b)
7,734,158,313,875,277,000
Return pairwises of an iterable arrary. Args: array (list or :class:`numpy.ndarray`): The input iterable array. Returns: :class:`zip`: zip objects.
gamse/utils/onedarray.py
pairwise
wangleon/gamse
python
def pairwise(array): 'Return pairwises of an iterable arrary.\n\n Args:\n array (list or :class:`numpy.ndarray`): The input iterable array.\n Returns:\n :class:`zip`: zip objects.\n ' (a, b) = tee(array) next(b, None) return zip(a, b)
def smooth(array, points, deg): 'Smooth an array.\n\n Args:\n array (:class:`numpy.ndarray`): Input array.\n points (int): Points of smoothing.\n deg (int): Degree of smoothing.\n\n Returns:\n :class:`numpy.ndarray`: smoothed array\n\n ' n = array.size if (points == 5): if (deg == 2): w_2 = (np.array([31.0, 9.0, (- 3.0), (- 5.0), 3.0]) / 35.0) w_1 = (np.array([9.0, 13.0, 12.0, 6.0, (- 5.0)]) / 35.0) w_0 = (np.array([(- 3.0), 12.0, 17.0, 12.0, (- 3.0)]) / 35.0) elif (deg == 3): w_2 = (np.array([69.0, 4.0, (- 6.0), 4.0, (- 1.0)]) / 70.0) w_1 = (np.array([2.0, 27.0, 12.0, (- 8.0), 2.0]) / 35.0) w_0 = (np.array([(- 3.0), 12.0, 17.0, 12.0, (- 3.0)]) / 35.0) a = np.zeros((n, n)) a[0, 0:5] = w_2 a[1, 0:5] = w_1 for i in np.arange(2, (n - 2)): a[i, (i - 2):(i + 3)] = w_0 a[(- 2), (- 5):] = w_1[::(- 1)] a[(- 1), (- 5):] = w_2[::(- 1)] result = (np.matrix(a) * np.matrix(array.reshape((- 1), 1))) return np.array(result)[:, 0]
627,949,520,329,858,600
Smooth an array. Args: array (:class:`numpy.ndarray`): Input array. points (int): Points of smoothing. deg (int): Degree of smoothing. Returns: :class:`numpy.ndarray`: smoothed array
gamse/utils/onedarray.py
smooth
wangleon/gamse
python
def smooth(array, points, deg): 'Smooth an array.\n\n Args:\n array (:class:`numpy.ndarray`): Input array.\n points (int): Points of smoothing.\n deg (int): Degree of smoothing.\n\n Returns:\n :class:`numpy.ndarray`: smoothed array\n\n ' n = array.size if (points == 5): if (deg == 2): w_2 = (np.array([31.0, 9.0, (- 3.0), (- 5.0), 3.0]) / 35.0) w_1 = (np.array([9.0, 13.0, 12.0, 6.0, (- 5.0)]) / 35.0) w_0 = (np.array([(- 3.0), 12.0, 17.0, 12.0, (- 3.0)]) / 35.0) elif (deg == 3): w_2 = (np.array([69.0, 4.0, (- 6.0), 4.0, (- 1.0)]) / 70.0) w_1 = (np.array([2.0, 27.0, 12.0, (- 8.0), 2.0]) / 35.0) w_0 = (np.array([(- 3.0), 12.0, 17.0, 12.0, (- 3.0)]) / 35.0) a = np.zeros((n, n)) a[0, 0:5] = w_2 a[1, 0:5] = w_1 for i in np.arange(2, (n - 2)): a[i, (i - 2):(i + 3)] = w_0 a[(- 2), (- 5):] = w_1[::(- 1)] a[(- 1), (- 5):] = w_2[::(- 1)] result = (np.matrix(a) * np.matrix(array.reshape((- 1), 1))) return np.array(result)[:, 0]
def iterative_savgol_filter(y, winlen=5, order=3, maxiter=10, upper_clip=None, lower_clip=None): 'Smooth the input array with Savitzky-Golay filter with lower and/or\n upper clippings.\n\n Args:\n y (:class:`numpy.ndarray`): Input array.\n winlen (int): Window length of Savitzky-Golay filter.\n order (int): Order of Savitzky-Gaoly filter.\n maxiter (int): Maximum number of iterations.\n lower_clip (float): Lower sigma-clipping value.\n upper_clip (float): Upper sigma-clipping value.\n\n Returns:\n tuple: A tuple containing:\n\n * **ysmooth** (:class:`numpy.ndarray`) – Smoothed y values.\n * **yres** (:class:`numpy.ndarray`) – Residuals of y values.\n * **mask** (:class:`numpy.ndarray`) – Mask of y values.\n * **std** (float) – Standard deviation.\n ' x = np.arange(y.size) mask = np.ones_like(y, dtype=np.bool) for ite in range(maxiter): f = intp.InterpolatedUnivariateSpline(x[mask], y[mask], k=3) ysmooth = savgol_filter(f(x), window_length=winlen, polyorder=order) yres = (y - ysmooth) std = yres[mask].std() new_mask = (mask * np.ones_like(mask, dtype=np.bool)) if (lower_clip is not None): new_mask *= (yres > ((- lower_clip) * std)) if (upper_clip is not None): new_mask *= (yres < (upper_clip * std)) if (new_mask.sum() == mask.sum()): break mask = new_mask return (ysmooth, yres, mask, std)
8,435,448,282,620,777,000
Smooth the input array with Savitzky-Golay filter with lower and/or upper clippings. Args: y (:class:`numpy.ndarray`): Input array. winlen (int): Window length of Savitzky-Golay filter. order (int): Order of Savitzky-Gaoly filter. maxiter (int): Maximum number of iterations. lower_clip (float): Lower sigma-clipping value. upper_clip (float): Upper sigma-clipping value. Returns: tuple: A tuple containing: * **ysmooth** (:class:`numpy.ndarray`) – Smoothed y values. * **yres** (:class:`numpy.ndarray`) – Residuals of y values. * **mask** (:class:`numpy.ndarray`) – Mask of y values. * **std** (float) – Standard deviation.
gamse/utils/onedarray.py
iterative_savgol_filter
wangleon/gamse
python
def iterative_savgol_filter(y, winlen=5, order=3, maxiter=10, upper_clip=None, lower_clip=None): 'Smooth the input array with Savitzky-Golay filter with lower and/or\n upper clippings.\n\n Args:\n y (:class:`numpy.ndarray`): Input array.\n winlen (int): Window length of Savitzky-Golay filter.\n order (int): Order of Savitzky-Gaoly filter.\n maxiter (int): Maximum number of iterations.\n lower_clip (float): Lower sigma-clipping value.\n upper_clip (float): Upper sigma-clipping value.\n\n Returns:\n tuple: A tuple containing:\n\n * **ysmooth** (:class:`numpy.ndarray`) – Smoothed y values.\n * **yres** (:class:`numpy.ndarray`) – Residuals of y values.\n * **mask** (:class:`numpy.ndarray`) – Mask of y values.\n * **std** (float) – Standard deviation.\n ' x = np.arange(y.size) mask = np.ones_like(y, dtype=np.bool) for ite in range(maxiter): f = intp.InterpolatedUnivariateSpline(x[mask], y[mask], k=3) ysmooth = savgol_filter(f(x), window_length=winlen, polyorder=order) yres = (y - ysmooth) std = yres[mask].std() new_mask = (mask * np.ones_like(mask, dtype=np.bool)) if (lower_clip is not None): new_mask *= (yres > ((- lower_clip) * std)) if (upper_clip is not None): new_mask *= (yres < (upper_clip * std)) if (new_mask.sum() == mask.sum()): break mask = new_mask return (ysmooth, yres, mask, std)
def process_file(filepath): '\n Rewrite links in `filepath` as follows: /some/path/index.html --> /some/path/\n ' if filepath.endswith('.html'): with open(filepath, 'r') as htmlfile: page = bs4.BeautifulSoup(htmlfile.read(), 'html.parser') links = page.find_all('a') for link in links: href = link['href'] if href.endswith('index.html'): href = href.replace('index.html', '') link['href'] = href video = page.find('video') if video: source = video.find('source') main_file = source['src'] tracks = video.find_all('track') if tracks: for track in tracks: new_src = main_file.replace('.mp4', '.vtt') track['src'] = new_src with open(filepath, 'w') as htmlfile: html = page.prettify() htmlfile.write(html)
-6,710,581,133,354,374,000
Rewrite links in `filepath` as follows: /some/path/index.html --> /some/path/
scripts/deindexify.py
process_file
learningequality/channel2site
python
def process_file(filepath): '\n \n ' if filepath.endswith('.html'): with open(filepath, 'r') as htmlfile: page = bs4.BeautifulSoup(htmlfile.read(), 'html.parser') links = page.find_all('a') for link in links: href = link['href'] if href.endswith('index.html'): href = href.replace('index.html', ) link['href'] = href video = page.find('video') if video: source = video.find('source') main_file = source['src'] tracks = video.find_all('track') if tracks: for track in tracks: new_src = main_file.replace('.mp4', '.vtt') track['src'] = new_src with open(filepath, 'w') as htmlfile: html = page.prettify() htmlfile.write(html)
def deindexify(webroot): '\n Walks directory stucutre starting at `webroot` and rewrites all folder links.\n ' content_folders = list(os.walk(webroot)) for (rel_path, _subfolders, filenames) in content_folders: for filename in filenames: filepath = os.path.join(rel_path, filename) if filepath.endswith('_Subtitle.vtt'): video_matching_filepath = filepath.replace('_Subtitle.vtt', '_Low_Resolution.vtt') os.rename(filepath, video_matching_filepath) else: process_file(filepath)
-8,528,974,312,587,828,000
Walks directory stucutre starting at `webroot` and rewrites all folder links.
scripts/deindexify.py
deindexify
learningequality/channel2site
python
def deindexify(webroot): '\n \n ' content_folders = list(os.walk(webroot)) for (rel_path, _subfolders, filenames) in content_folders: for filename in filenames: filepath = os.path.join(rel_path, filename) if filepath.endswith('_Subtitle.vtt'): video_matching_filepath = filepath.replace('_Subtitle.vtt', '_Low_Resolution.vtt') os.rename(filepath, video_matching_filepath) else: process_file(filepath)
def parse(self, stream, media_type=None, parser_context=None): '\n Given a stream to read from, return the parsed representation.\n Should return parsed data, or a `DataAndFiles` object consisting of the\n parsed data and files.\n ' raise NotImplementedError('.parse() must be overridden.')
-8,064,463,045,055,576,000
Given a stream to read from, return the parsed representation. Should return parsed data, or a `DataAndFiles` object consisting of the parsed data and files.
mparser.py
parse
marco-aziz/mPulse
python
def parse(self, stream, media_type=None, parser_context=None): '\n Given a stream to read from, return the parsed representation.\n Should return parsed data, or a `DataAndFiles` object consisting of the\n parsed data and files.\n ' raise NotImplementedError('.parse() must be overridden.')
def parse(self, stream, media_type=None, parser_context=None): "\n Treats the incoming bytestream as a raw file upload and returns\n a `DataAndFiles` object.\n\n `.data` will be None (we expect request body to be a file content).\n `.files` will be a `QueryDict` containing one 'file' element.\n " parser_context = (parser_context or {}) request = parser_context['request'] encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET) meta = request.META upload_handlers = request.upload_handlers filename = self.get_filename(stream, media_type, parser_context) content_type = meta.get('HTTP_CONTENT_TYPE', meta.get('CONTENT_TYPE', '')) try: content_length = int(meta.get('HTTP_CONTENT_LENGTH', meta.get('CONTENT_LENGTH', 0))) except (ValueError, TypeError): content_length = None for handler in upload_handlers: result = handler.handle_raw_input(stream, meta, content_length, None, encoding) if (result is not None): return DataAndFiles({}, {'file': result[1]}) possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size] chunk_size = min(([((2 ** 31) - 4)] + possible_sizes)) chunks = ChunkIter(stream, chunk_size) counters = ([0] * len(upload_handlers)) for (index, handler) in enumerate(upload_handlers): try: handler.new_file(None, filename, content_type, content_length, encoding) except StopFutureHandlers: upload_handlers = upload_handlers[:(index + 1)] break for chunk in chunks: for (index, handler) in enumerate(upload_handlers): '\n Trimming HttpResponse encapsulation from parsed file stream\n ' chunk_length = len(chunk) start = (chunk.find(bytes('\n\r\n', 'utf-8')) + 3) end = chunk.rfind(bytes('\r\n', 'utf-8')) end = (chunk[:end].rfind(bytes('\r\n', 'utf-8')) + 2) chunk = handler.receive_data_chunk(chunk[start:end], counters[index]) counters[index] += chunk_length if (chunk is None): break for (index, handler) in enumerate(upload_handlers): file_obj = handler.file_complete(counters[index]) if (file_obj is not None): return DataAndFiles({}, {'file': file_obj}) raise ParseError(self.errors['unhandled'])
-881,799,613,844,011,000
Treats the incoming bytestream as a raw file upload and returns a `DataAndFiles` object. `.data` will be None (we expect request body to be a file content). `.files` will be a `QueryDict` containing one 'file' element.
mparser.py
parse
marco-aziz/mPulse
python
def parse(self, stream, media_type=None, parser_context=None): "\n Treats the incoming bytestream as a raw file upload and returns\n a `DataAndFiles` object.\n\n `.data` will be None (we expect request body to be a file content).\n `.files` will be a `QueryDict` containing one 'file' element.\n " parser_context = (parser_context or {}) request = parser_context['request'] encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET) meta = request.META upload_handlers = request.upload_handlers filename = self.get_filename(stream, media_type, parser_context) content_type = meta.get('HTTP_CONTENT_TYPE', meta.get('CONTENT_TYPE', )) try: content_length = int(meta.get('HTTP_CONTENT_LENGTH', meta.get('CONTENT_LENGTH', 0))) except (ValueError, TypeError): content_length = None for handler in upload_handlers: result = handler.handle_raw_input(stream, meta, content_length, None, encoding) if (result is not None): return DataAndFiles({}, {'file': result[1]}) possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size] chunk_size = min(([((2 ** 31) - 4)] + possible_sizes)) chunks = ChunkIter(stream, chunk_size) counters = ([0] * len(upload_handlers)) for (index, handler) in enumerate(upload_handlers): try: handler.new_file(None, filename, content_type, content_length, encoding) except StopFutureHandlers: upload_handlers = upload_handlers[:(index + 1)] break for chunk in chunks: for (index, handler) in enumerate(upload_handlers): '\n Trimming HttpResponse encapsulation from parsed file stream\n ' chunk_length = len(chunk) start = (chunk.find(bytes('\n\r\n', 'utf-8')) + 3) end = chunk.rfind(bytes('\r\n', 'utf-8')) end = (chunk[:end].rfind(bytes('\r\n', 'utf-8')) + 2) chunk = handler.receive_data_chunk(chunk[start:end], counters[index]) counters[index] += chunk_length if (chunk is None): break for (index, handler) in enumerate(upload_handlers): file_obj = handler.file_complete(counters[index]) if (file_obj is not None): return DataAndFiles({}, {'file': file_obj}) raise ParseError(self.errors['unhandled'])
def get_filename(self, stream, media_type, parser_context): "\n Detects the uploaded file name. First searches a 'filename' url kwarg.\n Then tries to parse Content-Disposition header.\n " try: return parser_context['kwargs']['filename'] except KeyError: pass try: meta = parser_context['request'].META disposition = parse_header(meta['HTTP_CONTENT_DISPOSITION'].encode()) filename_parm = disposition[1] if ('filename*' in filename_parm): return self.get_encoded_filename(filename_parm) return force_str(filename_parm['filename']) except (AttributeError, KeyError, ValueError): pass
7,388,234,637,951,297,000
Detects the uploaded file name. First searches a 'filename' url kwarg. Then tries to parse Content-Disposition header.
mparser.py
get_filename
marco-aziz/mPulse
python
def get_filename(self, stream, media_type, parser_context): "\n Detects the uploaded file name. First searches a 'filename' url kwarg.\n Then tries to parse Content-Disposition header.\n " try: return parser_context['kwargs']['filename'] except KeyError: pass try: meta = parser_context['request'].META disposition = parse_header(meta['HTTP_CONTENT_DISPOSITION'].encode()) filename_parm = disposition[1] if ('filename*' in filename_parm): return self.get_encoded_filename(filename_parm) return force_str(filename_parm['filename']) except (AttributeError, KeyError, ValueError): pass
def get_encoded_filename(self, filename_parm): '\n Handle encoded filenames per RFC6266. See also:\n https://tools.ietf.org/html/rfc2231#section-4\n ' encoded_filename = force_str(filename_parm['filename*']) try: (charset, lang, filename) = encoded_filename.split("'", 2) filename = parse.unquote(filename) except (ValueError, LookupError): filename = force_str(filename_parm['filename']) return filename
3,080,074,238,029,017,600
Handle encoded filenames per RFC6266. See also: https://tools.ietf.org/html/rfc2231#section-4
mparser.py
get_encoded_filename
marco-aziz/mPulse
python
def get_encoded_filename(self, filename_parm): '\n Handle encoded filenames per RFC6266. See also:\n https://tools.ietf.org/html/rfc2231#section-4\n ' encoded_filename = force_str(filename_parm['filename*']) try: (charset, lang, filename) = encoded_filename.split("'", 2) filename = parse.unquote(filename) except (ValueError, LookupError): filename = force_str(filename_parm['filename']) return filename
def Dispose(self): ' Dispose(self: Element,A_0: bool) ' pass
-1,686,048,740,131,138,300
Dispose(self: Element,A_0: bool)
release/stubs.min/Autodesk/Revit/DB/__init___parts/CurveByPoints.py
Dispose
BCSharp/ironpython-stubs
python
def Dispose(self): ' ' pass
def getBoundingBox(self, *args): ' getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ ' pass
5,691,465,885,205,531,000
getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ
release/stubs.min/Autodesk/Revit/DB/__init___parts/CurveByPoints.py
getBoundingBox
BCSharp/ironpython-stubs
python
def getBoundingBox(self, *args): ' ' pass
def GetPoints(self): '\n GetPoints(self: CurveByPoints) -> ReferencePointArray\n\n \n\n Get the sequence of points interpolated by this curve.\n ' pass
-1,463,154,680,210,113,500
GetPoints(self: CurveByPoints) -> ReferencePointArray Get the sequence of points interpolated by this curve.
release/stubs.min/Autodesk/Revit/DB/__init___parts/CurveByPoints.py
GetPoints
BCSharp/ironpython-stubs
python
def GetPoints(self): '\n GetPoints(self: CurveByPoints) -> ReferencePointArray\n\n \n\n Get the sequence of points interpolated by this curve.\n ' pass
def GetVisibility(self): '\n GetVisibility(self: CurveByPoints) -> FamilyElementVisibility\n\n \n\n Gets the visibility.\n\n Returns: A copy of visibility settings for the curve.\n ' pass
-7,174,960,410,123,180,000
GetVisibility(self: CurveByPoints) -> FamilyElementVisibility Gets the visibility. Returns: A copy of visibility settings for the curve.
release/stubs.min/Autodesk/Revit/DB/__init___parts/CurveByPoints.py
GetVisibility
BCSharp/ironpython-stubs
python
def GetVisibility(self): '\n GetVisibility(self: CurveByPoints) -> FamilyElementVisibility\n\n \n\n Gets the visibility.\n\n Returns: A copy of visibility settings for the curve.\n ' pass
def ReleaseUnmanagedResources(self, *args): ' ReleaseUnmanagedResources(self: Element,disposing: bool) ' pass
-5,457,876,814,946,568,000
ReleaseUnmanagedResources(self: Element,disposing: bool)
release/stubs.min/Autodesk/Revit/DB/__init___parts/CurveByPoints.py
ReleaseUnmanagedResources
BCSharp/ironpython-stubs
python
def ReleaseUnmanagedResources(self, *args): ' ' pass
def setElementType(self, *args): ' setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) ' pass
2,544,228,957,635,987,500
setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str)
release/stubs.min/Autodesk/Revit/DB/__init___parts/CurveByPoints.py
setElementType
BCSharp/ironpython-stubs
python
def setElementType(self, *args): ' ' pass
def SetPoints(self, points): '\n SetPoints(self: CurveByPoints,points: ReferencePointArray)\n\n Change the sequence of points interpolated by this curve.\n\n \n\n points: An array of 2 or more ReferencePoints.\n ' pass
-4,495,061,781,904,432,000
SetPoints(self: CurveByPoints,points: ReferencePointArray) Change the sequence of points interpolated by this curve. points: An array of 2 or more ReferencePoints.
release/stubs.min/Autodesk/Revit/DB/__init___parts/CurveByPoints.py
SetPoints
BCSharp/ironpython-stubs
python
def SetPoints(self, points): '\n SetPoints(self: CurveByPoints,points: ReferencePointArray)\n\n Change the sequence of points interpolated by this curve.\n\n \n\n points: An array of 2 or more ReferencePoints.\n ' pass
def SetVisibility(self, visibility): '\n SetVisibility(self: CurveByPoints,visibility: FamilyElementVisibility)\n\n Sets the visibility.\n ' pass
-2,096,814,541,229,781,000
SetVisibility(self: CurveByPoints,visibility: FamilyElementVisibility) Sets the visibility.
release/stubs.min/Autodesk/Revit/DB/__init___parts/CurveByPoints.py
SetVisibility
BCSharp/ironpython-stubs
python
def SetVisibility(self, visibility): '\n SetVisibility(self: CurveByPoints,visibility: FamilyElementVisibility)\n\n Sets the visibility.\n ' pass
@staticmethod def SortPoints(arr): '\n SortPoints(arr: ReferencePointArray) -> bool\n\n \n\n Order a set of ReferencePoints in the same way Revit does\n\n when creating a \n\n curve from points.\n\n \n\n \n\n arr: An array of ReferencePoints. The array is reordered\n\n if sortPoints returns \n\n true,and is unchanged if\n\n sortPoints returns false.\n\n \n\n Returns: False if the least-squares method is unable to find a solution;\n\n true otherwise.\n ' pass
5,999,849,956,802,627,000
SortPoints(arr: ReferencePointArray) -> bool Order a set of ReferencePoints in the same way Revit does when creating a curve from points. arr: An array of ReferencePoints. The array is reordered if sortPoints returns true,and is unchanged if sortPoints returns false. Returns: False if the least-squares method is unable to find a solution; true otherwise.
release/stubs.min/Autodesk/Revit/DB/__init___parts/CurveByPoints.py
SortPoints
BCSharp/ironpython-stubs
python
@staticmethod def SortPoints(arr): '\n SortPoints(arr: ReferencePointArray) -> bool\n\n \n\n Order a set of ReferencePoints in the same way Revit does\n\n when creating a \n\n curve from points.\n\n \n\n \n\n arr: An array of ReferencePoints. The array is reordered\n\n if sortPoints returns \n\n true,and is unchanged if\n\n sortPoints returns false.\n\n \n\n Returns: False if the least-squares method is unable to find a solution;\n\n true otherwise.\n ' pass
def __enter__(self, *args): ' __enter__(self: IDisposable) -> object ' pass
-4,485,805,406,909,797,400
__enter__(self: IDisposable) -> object
release/stubs.min/Autodesk/Revit/DB/__init___parts/CurveByPoints.py
__enter__
BCSharp/ironpython-stubs
python
def __enter__(self, *args): ' ' pass
def __exit__(self, *args): ' __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) ' pass
-8,148,954,987,636,554,000
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
release/stubs.min/Autodesk/Revit/DB/__init___parts/CurveByPoints.py
__exit__
BCSharp/ironpython-stubs
python
def __exit__(self, *args): ' ' pass
def __init__(self, *args): ' x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature ' pass
-90,002,593,062,007,400
x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature
release/stubs.min/Autodesk/Revit/DB/__init___parts/CurveByPoints.py
__init__
BCSharp/ironpython-stubs
python
def __init__(self, *args): ' ' pass
@staticmethod def _get_original_labels(val_path): 'Returns labels for validation.\n\n Args:\n val_path: path to TAR file containing validation images. It is used to\n retrieve the name of pictures and associate them to labels.\n\n Returns:\n dict, mapping from image name (str) to label (str).\n ' labels_path = os.fspath(tfds.core.tfds_path(_VALIDATION_LABELS_FNAME)) with tf.io.gfile.GFile(labels_path) as labels_f: labels = labels_f.read().strip().splitlines() with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj: tar = tarfile.open(mode='r:', fileobj=tar_f_obj) images = sorted(tar.getnames()) return dict(zip(images, labels))
-8,557,001,365,274,106,000
Returns labels for validation. Args: val_path: path to TAR file containing validation images. It is used to retrieve the name of pictures and associate them to labels. Returns: dict, mapping from image name (str) to label (str).
tensorflow_datasets/image_classification/imagenet2012_real.py
_get_original_labels
Abduttayyeb/datasets
python
@staticmethod def _get_original_labels(val_path): 'Returns labels for validation.\n\n Args:\n val_path: path to TAR file containing validation images. It is used to\n retrieve the name of pictures and associate them to labels.\n\n Returns:\n dict, mapping from image name (str) to label (str).\n ' labels_path = os.fspath(tfds.core.tfds_path(_VALIDATION_LABELS_FNAME)) with tf.io.gfile.GFile(labels_path) as labels_f: labels = labels_f.read().strip().splitlines() with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj: tar = tarfile.open(mode='r:', fileobj=tar_f_obj) images = sorted(tar.getnames()) return dict(zip(images, labels))
def test_vifport(self): 'create and stringify vif port, confirm no exceptions' self.mox.ReplayAll() pname = 'vif1.0' ofport = 5 vif_id = uuidutils.generate_uuid() mac = 'ca:fe:de:ad:be:ef' port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br) self.assertEqual(port.port_name, pname) self.assertEqual(port.ofport, ofport) self.assertEqual(port.vif_id, vif_id) self.assertEqual(port.vif_mac, mac) self.assertEqual(port.switch.br_name, self.BR_NAME) foo = str(port) self.mox.VerifyAll()
-6,393,318,519,462,724,000
create and stringify vif port, confirm no exceptions
quantum/tests/unit/openvswitch/test_ovs_lib.py
test_vifport
ericwanghp/quantum
python
def test_vifport(self): self.mox.ReplayAll() pname = 'vif1.0' ofport = 5 vif_id = uuidutils.generate_uuid() mac = 'ca:fe:de:ad:be:ef' port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br) self.assertEqual(port.port_name, pname) self.assertEqual(port.ofport, ofport) self.assertEqual(port.vif_id, vif_id) self.assertEqual(port.vif_mac, mac) self.assertEqual(port.switch.br_name, self.BR_NAME) foo = str(port) self.mox.VerifyAll()