repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
cloudant/python-cloudant
src/cloudant/database.py
CouchDatabase.delete_query_index
def delete_query_index(self, design_document_id, index_type, index_name): """ Deletes the query index identified by the design document id, index type and index name from the remote database. :param str design_document_id: The design document id that the index exists in. :param str index_type: The type of the index to be deleted. Must be either 'text' or 'json'. :param str index_name: The index name of the index to be deleted. """ if index_type == JSON_INDEX_TYPE: index = Index(self, design_document_id, index_name) elif index_type == TEXT_INDEX_TYPE: index = TextIndex(self, design_document_id, index_name) else: raise CloudantArgumentError(103, index_type) index.delete()
python
def delete_query_index(self, design_document_id, index_type, index_name): """ Deletes the query index identified by the design document id, index type and index name from the remote database. :param str design_document_id: The design document id that the index exists in. :param str index_type: The type of the index to be deleted. Must be either 'text' or 'json'. :param str index_name: The index name of the index to be deleted. """ if index_type == JSON_INDEX_TYPE: index = Index(self, design_document_id, index_name) elif index_type == TEXT_INDEX_TYPE: index = TextIndex(self, design_document_id, index_name) else: raise CloudantArgumentError(103, index_type) index.delete()
[ "def", "delete_query_index", "(", "self", ",", "design_document_id", ",", "index_type", ",", "index_name", ")", ":", "if", "index_type", "==", "JSON_INDEX_TYPE", ":", "index", "=", "Index", "(", "self", ",", "design_document_id", ",", "index_name", ")", "elif", ...
Deletes the query index identified by the design document id, index type and index name from the remote database. :param str design_document_id: The design document id that the index exists in. :param str index_type: The type of the index to be deleted. Must be either 'text' or 'json'. :param str index_name: The index name of the index to be deleted.
[ "Deletes", "the", "query", "index", "identified", "by", "the", "design", "document", "id", "index", "type", "and", "index", "name", "from", "the", "remote", "database", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/database.py#L1151-L1168
train
206,400
cloudant/python-cloudant
src/cloudant/database.py
CouchDatabase.get_partitioned_query_result
def get_partitioned_query_result(self, partition_key, selector, fields=None, raw_result=False, **kwargs): """ Retrieves the partitioned query result from the specified database based on the query parameters provided. See :func:`~cloudant.database.CouchDatabase.get_query_result` method for further details. :param str partition_key: Partition key. :param str selector: Dictionary object describing criteria used to select documents. :param list fields: A list of fields to be returned by the query. :param bool raw_result: Dictates whether the query result is returned wrapped in a QueryResult or if the response JSON is returned. Defaults to False. :param kwargs: See :func:`~cloudant.database.CouchDatabase.get_query_result` method for available keyword arguments. :returns: The result content either wrapped in a QueryResult or as the raw response JSON content. :rtype: QueryResult, dict """ query = Query(self, selector=selector, fields=fields, partition_key=partition_key) return self._get_query_result(query, raw_result, **kwargs)
python
def get_partitioned_query_result(self, partition_key, selector, fields=None, raw_result=False, **kwargs): """ Retrieves the partitioned query result from the specified database based on the query parameters provided. See :func:`~cloudant.database.CouchDatabase.get_query_result` method for further details. :param str partition_key: Partition key. :param str selector: Dictionary object describing criteria used to select documents. :param list fields: A list of fields to be returned by the query. :param bool raw_result: Dictates whether the query result is returned wrapped in a QueryResult or if the response JSON is returned. Defaults to False. :param kwargs: See :func:`~cloudant.database.CouchDatabase.get_query_result` method for available keyword arguments. :returns: The result content either wrapped in a QueryResult or as the raw response JSON content. :rtype: QueryResult, dict """ query = Query(self, selector=selector, fields=fields, partition_key=partition_key) return self._get_query_result(query, raw_result, **kwargs)
[ "def", "get_partitioned_query_result", "(", "self", ",", "partition_key", ",", "selector", ",", "fields", "=", "None", ",", "raw_result", "=", "False", ",", "*", "*", "kwargs", ")", ":", "query", "=", "Query", "(", "self", ",", "selector", "=", "selector",...
Retrieves the partitioned query result from the specified database based on the query parameters provided. See :func:`~cloudant.database.CouchDatabase.get_query_result` method for further details. :param str partition_key: Partition key. :param str selector: Dictionary object describing criteria used to select documents. :param list fields: A list of fields to be returned by the query. :param bool raw_result: Dictates whether the query result is returned wrapped in a QueryResult or if the response JSON is returned. Defaults to False. :param kwargs: See :func:`~cloudant.database.CouchDatabase.get_query_result` method for available keyword arguments. :returns: The result content either wrapped in a QueryResult or as the raw response JSON content. :rtype: QueryResult, dict
[ "Retrieves", "the", "partitioned", "query", "result", "from", "the", "specified", "database", "based", "on", "the", "query", "parameters", "provided", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/database.py#L1170-L1198
train
206,401
cloudant/python-cloudant
src/cloudant/database.py
CouchDatabase._get_query_result
def _get_query_result(query, raw_result, **kwargs): """ Get query results helper. """ if raw_result: return query(**kwargs) if kwargs: return QueryResult(query, **kwargs) return query.result
python
def _get_query_result(query, raw_result, **kwargs): """ Get query results helper. """ if raw_result: return query(**kwargs) if kwargs: return QueryResult(query, **kwargs) return query.result
[ "def", "_get_query_result", "(", "query", ",", "raw_result", ",", "*", "*", "kwargs", ")", ":", "if", "raw_result", ":", "return", "query", "(", "*", "*", "kwargs", ")", "if", "kwargs", ":", "return", "QueryResult", "(", "query", ",", "*", "*", "kwargs...
Get query results helper.
[ "Get", "query", "results", "helper", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/database.py#L1276-L1283
train
206,402
cloudant/python-cloudant
src/cloudant/database.py
CloudantDatabase.shards
def shards(self): """ Retrieves information about the shards in the current remote database. :returns: Shard information retrieval status in JSON format """ url = '/'.join((self.database_url, '_shards')) resp = self.r_session.get(url) resp.raise_for_status() return response_to_json_dict(resp)
python
def shards(self): """ Retrieves information about the shards in the current remote database. :returns: Shard information retrieval status in JSON format """ url = '/'.join((self.database_url, '_shards')) resp = self.r_session.get(url) resp.raise_for_status() return response_to_json_dict(resp)
[ "def", "shards", "(", "self", ")", ":", "url", "=", "'/'", ".", "join", "(", "(", "self", ".", "database_url", ",", "'_shards'", ")", ")", "resp", "=", "self", ".", "r_session", ".", "get", "(", "url", ")", "resp", ".", "raise_for_status", "(", ")"...
Retrieves information about the shards in the current remote database. :returns: Shard information retrieval status in JSON format
[ "Retrieves", "information", "about", "the", "shards", "in", "the", "current", "remote", "database", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/database.py#L1403-L1413
train
206,403
cloudant/python-cloudant
src/cloudant/database.py
CloudantDatabase.get_partitioned_search_result
def get_partitioned_search_result(self, partition_key, ddoc_id, index_name, **query_params): """ Retrieves the raw JSON content from the remote database based on the partitioned search index on the server, using the query_params provided as query parameters. See :func:`~cloudant.database.CouchDatabase.get_search_result` method for further details. :param str partition_key: Partition key. :param str ddoc_id: Design document id used to get the search result. :param str index_name: Name used in part to identify the index. :param query_params: See :func:`~cloudant.database.CloudantDatabase.get_search_result` method for available keyword arguments. :returns: Search query result data in JSON format. :rtype: dict """ ddoc = DesignDocument(self, ddoc_id) return self._get_search_result( '/'.join(( ddoc.document_partition_url(partition_key), '_search', index_name )), **query_params )
python
def get_partitioned_search_result(self, partition_key, ddoc_id, index_name, **query_params): """ Retrieves the raw JSON content from the remote database based on the partitioned search index on the server, using the query_params provided as query parameters. See :func:`~cloudant.database.CouchDatabase.get_search_result` method for further details. :param str partition_key: Partition key. :param str ddoc_id: Design document id used to get the search result. :param str index_name: Name used in part to identify the index. :param query_params: See :func:`~cloudant.database.CloudantDatabase.get_search_result` method for available keyword arguments. :returns: Search query result data in JSON format. :rtype: dict """ ddoc = DesignDocument(self, ddoc_id) return self._get_search_result( '/'.join(( ddoc.document_partition_url(partition_key), '_search', index_name )), **query_params )
[ "def", "get_partitioned_search_result", "(", "self", ",", "partition_key", ",", "ddoc_id", ",", "index_name", ",", "*", "*", "query_params", ")", ":", "ddoc", "=", "DesignDocument", "(", "self", ",", "ddoc_id", ")", "return", "self", ".", "_get_search_result", ...
Retrieves the raw JSON content from the remote database based on the partitioned search index on the server, using the query_params provided as query parameters. See :func:`~cloudant.database.CouchDatabase.get_search_result` method for further details. :param str partition_key: Partition key. :param str ddoc_id: Design document id used to get the search result. :param str index_name: Name used in part to identify the index. :param query_params: See :func:`~cloudant.database.CloudantDatabase.get_search_result` method for available keyword arguments. :returns: Search query result data in JSON format. :rtype: dict
[ "Retrieves", "the", "raw", "JSON", "content", "from", "the", "remote", "database", "based", "on", "the", "partitioned", "search", "index", "on", "the", "server", "using", "the", "query_params", "provided", "as", "query", "parameters", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/database.py#L1415-L1443
train
206,404
cloudant/python-cloudant
src/cloudant/database.py
CloudantDatabase.get_search_result
def get_search_result(self, ddoc_id, index_name, **query_params): """ Retrieves the raw JSON content from the remote database based on the search index on the server, using the query_params provided as query parameters. A ``query`` parameter containing the Lucene query syntax is mandatory. Example for search queries: .. code-block:: python # Assuming that 'searchindex001' exists as part of the # 'ddoc001' design document in the remote database... # Retrieve documents where the Lucene field name is 'name' and # the value is 'julia*' resp = db.get_search_result('ddoc001', 'searchindex001', query='name:julia*', include_docs=True) for row in resp['rows']: # Process search index data (in JSON format). Example if the search query requires grouping by using the ``group_field`` parameter: .. code-block:: python # Assuming that 'searchindex001' exists as part of the # 'ddoc001' design document in the remote database... # Retrieve JSON response content, limiting response to 10 documents resp = db.get_search_result('ddoc001', 'searchindex001', query='name:julia*', group_field='name', limit=10) for group in resp['groups']: for row in group['rows']: # Process search index data (in JSON format). :param str ddoc_id: Design document id used to get the search result. :param str index_name: Name used in part to identify the index. :param str bookmark: Optional string that enables you to specify which page of results you require. Only valid for queries that do not specify the ``group_field`` query parameter. :param list counts: Optional JSON array of field names for which counts should be produced. The response will contain counts for each unique value of this field name among the documents matching the search query. Requires the index to have faceting enabled. :param list drilldown: Optional list of fields that each define a pair of a field name and a value. This field can be used several times. The search will only match documents that have the given value in the field name. It differs from using ``query=fieldname:value`` only in that the values are not analyzed. :param str group_field: Optional string field by which to group search matches. Fields containing other data (numbers, objects, arrays) can not be used. :param int group_limit: Optional number with the maximum group count. This field can only be used if ``group_field`` query parameter is specified. :param group_sort: Optional JSON field that defines the order of the groups in a search using ``group_field``. The default sort order is relevance. This field can have the same values as the sort field, so single fields as well as arrays of fields are supported. :param int limit: Optional number to limit the maximum count of the returned documents. In case of a grouped search, this parameter limits the number of documents per group. :param query/q: A Lucene query in the form of ``name:value``. If name is omitted, the special value ``default`` is used. The ``query`` parameter can be abbreviated as ``q``. :param ranges: Optional JSON facet syntax that reuses the standard Lucene syntax to return counts of results which fit into each specified category. Inclusive range queries are denoted by brackets. Exclusive range queries are denoted by curly brackets. For example ``ranges={"price":{"cheap":"[0 TO 100]"}}`` has an inclusive range of 0 to 100. Requires the index to have faceting enabled. :param sort: Optional JSON string of the form ``fieldname<type>`` for ascending or ``-fieldname<type>`` for descending sort order. Fieldname is the name of a string or number field and type is either number or string or a JSON array of such strings. The type part is optional and defaults to number. :param str stale: Optional string to allow the results from a stale index to be used. This makes the request return immediately, even if the index has not been completely built yet. :param list highlight_fields: Optional list of fields which should be highlighted. :param str highlight_pre_tag: Optional string inserted before the highlighted word in the highlights output. Defaults to ``<em>``. :param str highlight_post_tag: Optional string inserted after the highlighted word in the highlights output. Defaults to ``</em>``. :param int highlight_number: Optional number of fragments returned in highlights. If the search term occurs less often than the number of fragments specified, longer fragments are returned. Default is 1. :param int highlight_size: Optional number of characters in each fragment for highlights. Defaults to 100 characters. :param list include_fields: Optional list of field names to include in search results. Any fields included must have been indexed with the ``store:true`` option. :returns: Search query result data in JSON format """ ddoc = DesignDocument(self, ddoc_id) return self._get_search_result( '/'.join((ddoc.document_url, '_search', index_name)), **query_params )
python
def get_search_result(self, ddoc_id, index_name, **query_params): """ Retrieves the raw JSON content from the remote database based on the search index on the server, using the query_params provided as query parameters. A ``query`` parameter containing the Lucene query syntax is mandatory. Example for search queries: .. code-block:: python # Assuming that 'searchindex001' exists as part of the # 'ddoc001' design document in the remote database... # Retrieve documents where the Lucene field name is 'name' and # the value is 'julia*' resp = db.get_search_result('ddoc001', 'searchindex001', query='name:julia*', include_docs=True) for row in resp['rows']: # Process search index data (in JSON format). Example if the search query requires grouping by using the ``group_field`` parameter: .. code-block:: python # Assuming that 'searchindex001' exists as part of the # 'ddoc001' design document in the remote database... # Retrieve JSON response content, limiting response to 10 documents resp = db.get_search_result('ddoc001', 'searchindex001', query='name:julia*', group_field='name', limit=10) for group in resp['groups']: for row in group['rows']: # Process search index data (in JSON format). :param str ddoc_id: Design document id used to get the search result. :param str index_name: Name used in part to identify the index. :param str bookmark: Optional string that enables you to specify which page of results you require. Only valid for queries that do not specify the ``group_field`` query parameter. :param list counts: Optional JSON array of field names for which counts should be produced. The response will contain counts for each unique value of this field name among the documents matching the search query. Requires the index to have faceting enabled. :param list drilldown: Optional list of fields that each define a pair of a field name and a value. This field can be used several times. The search will only match documents that have the given value in the field name. It differs from using ``query=fieldname:value`` only in that the values are not analyzed. :param str group_field: Optional string field by which to group search matches. Fields containing other data (numbers, objects, arrays) can not be used. :param int group_limit: Optional number with the maximum group count. This field can only be used if ``group_field`` query parameter is specified. :param group_sort: Optional JSON field that defines the order of the groups in a search using ``group_field``. The default sort order is relevance. This field can have the same values as the sort field, so single fields as well as arrays of fields are supported. :param int limit: Optional number to limit the maximum count of the returned documents. In case of a grouped search, this parameter limits the number of documents per group. :param query/q: A Lucene query in the form of ``name:value``. If name is omitted, the special value ``default`` is used. The ``query`` parameter can be abbreviated as ``q``. :param ranges: Optional JSON facet syntax that reuses the standard Lucene syntax to return counts of results which fit into each specified category. Inclusive range queries are denoted by brackets. Exclusive range queries are denoted by curly brackets. For example ``ranges={"price":{"cheap":"[0 TO 100]"}}`` has an inclusive range of 0 to 100. Requires the index to have faceting enabled. :param sort: Optional JSON string of the form ``fieldname<type>`` for ascending or ``-fieldname<type>`` for descending sort order. Fieldname is the name of a string or number field and type is either number or string or a JSON array of such strings. The type part is optional and defaults to number. :param str stale: Optional string to allow the results from a stale index to be used. This makes the request return immediately, even if the index has not been completely built yet. :param list highlight_fields: Optional list of fields which should be highlighted. :param str highlight_pre_tag: Optional string inserted before the highlighted word in the highlights output. Defaults to ``<em>``. :param str highlight_post_tag: Optional string inserted after the highlighted word in the highlights output. Defaults to ``</em>``. :param int highlight_number: Optional number of fragments returned in highlights. If the search term occurs less often than the number of fragments specified, longer fragments are returned. Default is 1. :param int highlight_size: Optional number of characters in each fragment for highlights. Defaults to 100 characters. :param list include_fields: Optional list of field names to include in search results. Any fields included must have been indexed with the ``store:true`` option. :returns: Search query result data in JSON format """ ddoc = DesignDocument(self, ddoc_id) return self._get_search_result( '/'.join((ddoc.document_url, '_search', index_name)), **query_params )
[ "def", "get_search_result", "(", "self", ",", "ddoc_id", ",", "index_name", ",", "*", "*", "query_params", ")", ":", "ddoc", "=", "DesignDocument", "(", "self", ",", "ddoc_id", ")", "return", "self", ".", "_get_search_result", "(", "'/'", ".", "join", "(",...
Retrieves the raw JSON content from the remote database based on the search index on the server, using the query_params provided as query parameters. A ``query`` parameter containing the Lucene query syntax is mandatory. Example for search queries: .. code-block:: python # Assuming that 'searchindex001' exists as part of the # 'ddoc001' design document in the remote database... # Retrieve documents where the Lucene field name is 'name' and # the value is 'julia*' resp = db.get_search_result('ddoc001', 'searchindex001', query='name:julia*', include_docs=True) for row in resp['rows']: # Process search index data (in JSON format). Example if the search query requires grouping by using the ``group_field`` parameter: .. code-block:: python # Assuming that 'searchindex001' exists as part of the # 'ddoc001' design document in the remote database... # Retrieve JSON response content, limiting response to 10 documents resp = db.get_search_result('ddoc001', 'searchindex001', query='name:julia*', group_field='name', limit=10) for group in resp['groups']: for row in group['rows']: # Process search index data (in JSON format). :param str ddoc_id: Design document id used to get the search result. :param str index_name: Name used in part to identify the index. :param str bookmark: Optional string that enables you to specify which page of results you require. Only valid for queries that do not specify the ``group_field`` query parameter. :param list counts: Optional JSON array of field names for which counts should be produced. The response will contain counts for each unique value of this field name among the documents matching the search query. Requires the index to have faceting enabled. :param list drilldown: Optional list of fields that each define a pair of a field name and a value. This field can be used several times. The search will only match documents that have the given value in the field name. It differs from using ``query=fieldname:value`` only in that the values are not analyzed. :param str group_field: Optional string field by which to group search matches. Fields containing other data (numbers, objects, arrays) can not be used. :param int group_limit: Optional number with the maximum group count. This field can only be used if ``group_field`` query parameter is specified. :param group_sort: Optional JSON field that defines the order of the groups in a search using ``group_field``. The default sort order is relevance. This field can have the same values as the sort field, so single fields as well as arrays of fields are supported. :param int limit: Optional number to limit the maximum count of the returned documents. In case of a grouped search, this parameter limits the number of documents per group. :param query/q: A Lucene query in the form of ``name:value``. If name is omitted, the special value ``default`` is used. The ``query`` parameter can be abbreviated as ``q``. :param ranges: Optional JSON facet syntax that reuses the standard Lucene syntax to return counts of results which fit into each specified category. Inclusive range queries are denoted by brackets. Exclusive range queries are denoted by curly brackets. For example ``ranges={"price":{"cheap":"[0 TO 100]"}}`` has an inclusive range of 0 to 100. Requires the index to have faceting enabled. :param sort: Optional JSON string of the form ``fieldname<type>`` for ascending or ``-fieldname<type>`` for descending sort order. Fieldname is the name of a string or number field and type is either number or string or a JSON array of such strings. The type part is optional and defaults to number. :param str stale: Optional string to allow the results from a stale index to be used. This makes the request return immediately, even if the index has not been completely built yet. :param list highlight_fields: Optional list of fields which should be highlighted. :param str highlight_pre_tag: Optional string inserted before the highlighted word in the highlights output. Defaults to ``<em>``. :param str highlight_post_tag: Optional string inserted after the highlighted word in the highlights output. Defaults to ``</em>``. :param int highlight_number: Optional number of fragments returned in highlights. If the search term occurs less often than the number of fragments specified, longer fragments are returned. Default is 1. :param int highlight_size: Optional number of characters in each fragment for highlights. Defaults to 100 characters. :param list include_fields: Optional list of field names to include in search results. Any fields included must have been indexed with the ``store:true`` option. :returns: Search query result data in JSON format
[ "Retrieves", "the", "raw", "JSON", "content", "from", "the", "remote", "database", "based", "on", "the", "search", "index", "on", "the", "server", "using", "the", "query_params", "provided", "as", "query", "parameters", ".", "A", "query", "parameter", "contain...
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/database.py#L1445-L1549
train
206,405
cloudant/python-cloudant
src/cloudant/database.py
CloudantDatabase._get_search_result
def _get_search_result(self, query_url, **query_params): """ Get search results helper. """ param_q = query_params.get('q') param_query = query_params.get('query') # Either q or query parameter is required if bool(param_q) == bool(param_query): raise CloudantArgumentError(104, query_params) # Validate query arguments and values for key, val in iteritems_(query_params): if key not in list(SEARCH_INDEX_ARGS.keys()): raise CloudantArgumentError(105, key) if not isinstance(val, SEARCH_INDEX_ARGS[key]): raise CloudantArgumentError(106, key, SEARCH_INDEX_ARGS[key]) # Execute query search headers = {'Content-Type': 'application/json'} resp = self.r_session.post( query_url, headers=headers, data=json.dumps(query_params, cls=self.client.encoder) ) resp.raise_for_status() return response_to_json_dict(resp)
python
def _get_search_result(self, query_url, **query_params): """ Get search results helper. """ param_q = query_params.get('q') param_query = query_params.get('query') # Either q or query parameter is required if bool(param_q) == bool(param_query): raise CloudantArgumentError(104, query_params) # Validate query arguments and values for key, val in iteritems_(query_params): if key not in list(SEARCH_INDEX_ARGS.keys()): raise CloudantArgumentError(105, key) if not isinstance(val, SEARCH_INDEX_ARGS[key]): raise CloudantArgumentError(106, key, SEARCH_INDEX_ARGS[key]) # Execute query search headers = {'Content-Type': 'application/json'} resp = self.r_session.post( query_url, headers=headers, data=json.dumps(query_params, cls=self.client.encoder) ) resp.raise_for_status() return response_to_json_dict(resp)
[ "def", "_get_search_result", "(", "self", ",", "query_url", ",", "*", "*", "query_params", ")", ":", "param_q", "=", "query_params", ".", "get", "(", "'q'", ")", "param_query", "=", "query_params", ".", "get", "(", "'query'", ")", "# Either q or query paramete...
Get search results helper.
[ "Get", "search", "results", "helper", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/database.py#L1551-L1573
train
206,406
cloudant/python-cloudant
src/cloudant/replicator.py
Replicator.create_replication
def create_replication(self, source_db=None, target_db=None, repl_id=None, **kwargs): """ Creates a new replication task. :param source_db: Database object to replicate from. Can be either a ``CouchDatabase`` or ``CloudantDatabase`` instance. :param target_db: Database object to replicate to. Can be either a ``CouchDatabase`` or ``CloudantDatabase`` instance. :param str repl_id: Optional replication id. Generated internally if not explicitly set. :param dict user_ctx: Optional user to act as. Composed internally if not explicitly set. :param bool create_target: Specifies whether or not to create the target, if it does not already exist. :param bool continuous: If set to True then the replication will be continuous. :returns: Replication document as a Document instance """ if source_db is None: raise CloudantReplicatorException(101) if target_db is None: raise CloudantReplicatorException(102) data = dict( _id=repl_id if repl_id else str(uuid.uuid4()), **kwargs ) # replication source data['source'] = {'url': source_db.database_url} if source_db.admin_party: pass # no credentials required elif source_db.client.is_iam_authenticated: data['source'].update({'auth': { 'iam': {'api_key': source_db.client.r_session.get_api_key} }}) else: data['source'].update({'headers': { 'Authorization': source_db.creds['basic_auth'] }}) # replication target data['target'] = {'url': target_db.database_url} if target_db.admin_party: pass # no credentials required elif target_db.client.is_iam_authenticated: data['target'].update({'auth': { 'iam': {'api_key': target_db.client.r_session.get_api_key} }}) else: data['target'].update({'headers': { 'Authorization': target_db.creds['basic_auth'] }}) # add user context delegation if not data.get('user_ctx') and self.database.creds and \ self.database.creds.get('user_ctx'): data['user_ctx'] = self.database.creds['user_ctx'] return self.database.create_document(data, throw_on_exists=True)
python
def create_replication(self, source_db=None, target_db=None, repl_id=None, **kwargs): """ Creates a new replication task. :param source_db: Database object to replicate from. Can be either a ``CouchDatabase`` or ``CloudantDatabase`` instance. :param target_db: Database object to replicate to. Can be either a ``CouchDatabase`` or ``CloudantDatabase`` instance. :param str repl_id: Optional replication id. Generated internally if not explicitly set. :param dict user_ctx: Optional user to act as. Composed internally if not explicitly set. :param bool create_target: Specifies whether or not to create the target, if it does not already exist. :param bool continuous: If set to True then the replication will be continuous. :returns: Replication document as a Document instance """ if source_db is None: raise CloudantReplicatorException(101) if target_db is None: raise CloudantReplicatorException(102) data = dict( _id=repl_id if repl_id else str(uuid.uuid4()), **kwargs ) # replication source data['source'] = {'url': source_db.database_url} if source_db.admin_party: pass # no credentials required elif source_db.client.is_iam_authenticated: data['source'].update({'auth': { 'iam': {'api_key': source_db.client.r_session.get_api_key} }}) else: data['source'].update({'headers': { 'Authorization': source_db.creds['basic_auth'] }}) # replication target data['target'] = {'url': target_db.database_url} if target_db.admin_party: pass # no credentials required elif target_db.client.is_iam_authenticated: data['target'].update({'auth': { 'iam': {'api_key': target_db.client.r_session.get_api_key} }}) else: data['target'].update({'headers': { 'Authorization': target_db.creds['basic_auth'] }}) # add user context delegation if not data.get('user_ctx') and self.database.creds and \ self.database.creds.get('user_ctx'): data['user_ctx'] = self.database.creds['user_ctx'] return self.database.create_document(data, throw_on_exists=True)
[ "def", "create_replication", "(", "self", ",", "source_db", "=", "None", ",", "target_db", "=", "None", ",", "repl_id", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "source_db", "is", "None", ":", "raise", "CloudantReplicatorException", "(", "101"...
Creates a new replication task. :param source_db: Database object to replicate from. Can be either a ``CouchDatabase`` or ``CloudantDatabase`` instance. :param target_db: Database object to replicate to. Can be either a ``CouchDatabase`` or ``CloudantDatabase`` instance. :param str repl_id: Optional replication id. Generated internally if not explicitly set. :param dict user_ctx: Optional user to act as. Composed internally if not explicitly set. :param bool create_target: Specifies whether or not to create the target, if it does not already exist. :param bool continuous: If set to True then the replication will be continuous. :returns: Replication document as a Document instance
[ "Creates", "a", "new", "replication", "task", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/replicator.py#L46-L111
train
206,407
cloudant/python-cloudant
src/cloudant/replicator.py
Replicator.list_replications
def list_replications(self): """ Retrieves all replication documents from the replication database. :returns: List containing replication Document objects """ docs = self.database.all_docs(include_docs=True)['rows'] documents = [] for doc in docs: if doc['id'].startswith('_design/'): continue document = Document(self.database, doc['id']) document.update(doc['doc']) documents.append(document) return documents
python
def list_replications(self): """ Retrieves all replication documents from the replication database. :returns: List containing replication Document objects """ docs = self.database.all_docs(include_docs=True)['rows'] documents = [] for doc in docs: if doc['id'].startswith('_design/'): continue document = Document(self.database, doc['id']) document.update(doc['doc']) documents.append(document) return documents
[ "def", "list_replications", "(", "self", ")", ":", "docs", "=", "self", ".", "database", ".", "all_docs", "(", "include_docs", "=", "True", ")", "[", "'rows'", "]", "documents", "=", "[", "]", "for", "doc", "in", "docs", ":", "if", "doc", "[", "'id'"...
Retrieves all replication documents from the replication database. :returns: List containing replication Document objects
[ "Retrieves", "all", "replication", "documents", "from", "the", "replication", "database", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/replicator.py#L113-L127
train
206,408
cloudant/python-cloudant
src/cloudant/replicator.py
Replicator.follow_replication
def follow_replication(self, repl_id): """ Blocks and streams status of a given replication. For example: .. code-block:: python for doc in replicator.follow_replication(repl_doc_id): # Process replication information as it comes in :param str repl_id: Replication id used to identify the replication to inspect. :returns: Iterable stream of copies of the replication Document and replication state as a ``str`` for the specified replication id """ def update_state(): """ Retrieves the replication state. """ if "scheduler" in self.client.features(): try: arepl_doc = Scheduler(self.client).get_doc(repl_id) return arepl_doc, arepl_doc['state'] except HTTPError: return None, None else: try: arepl_doc = self.database[repl_id] arepl_doc.fetch() return arepl_doc, arepl_doc.get('_replication_state') except KeyError: return None, None while True: # Make sure we fetch the state up front, just in case it moves # too fast and we miss it in the changes feed. repl_doc, state = update_state() if repl_doc: yield repl_doc if state is not None and state in ['error', 'completed']: return # Now listen on changes feed for the state for change in self.database.changes(): if change.get('id') == repl_id: repl_doc, state = update_state() if repl_doc is not None: yield repl_doc if state is not None and state in ['error', 'completed']: return
python
def follow_replication(self, repl_id): """ Blocks and streams status of a given replication. For example: .. code-block:: python for doc in replicator.follow_replication(repl_doc_id): # Process replication information as it comes in :param str repl_id: Replication id used to identify the replication to inspect. :returns: Iterable stream of copies of the replication Document and replication state as a ``str`` for the specified replication id """ def update_state(): """ Retrieves the replication state. """ if "scheduler" in self.client.features(): try: arepl_doc = Scheduler(self.client).get_doc(repl_id) return arepl_doc, arepl_doc['state'] except HTTPError: return None, None else: try: arepl_doc = self.database[repl_id] arepl_doc.fetch() return arepl_doc, arepl_doc.get('_replication_state') except KeyError: return None, None while True: # Make sure we fetch the state up front, just in case it moves # too fast and we miss it in the changes feed. repl_doc, state = update_state() if repl_doc: yield repl_doc if state is not None and state in ['error', 'completed']: return # Now listen on changes feed for the state for change in self.database.changes(): if change.get('id') == repl_id: repl_doc, state = update_state() if repl_doc is not None: yield repl_doc if state is not None and state in ['error', 'completed']: return
[ "def", "follow_replication", "(", "self", ",", "repl_id", ")", ":", "def", "update_state", "(", ")", ":", "\"\"\"\n Retrieves the replication state.\n \"\"\"", "if", "\"scheduler\"", "in", "self", ".", "client", ".", "features", "(", ")", ":", ...
Blocks and streams status of a given replication. For example: .. code-block:: python for doc in replicator.follow_replication(repl_doc_id): # Process replication information as it comes in :param str repl_id: Replication id used to identify the replication to inspect. :returns: Iterable stream of copies of the replication Document and replication state as a ``str`` for the specified replication id
[ "Blocks", "and", "streams", "status", "of", "a", "given", "replication", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/replicator.py#L155-L206
train
206,409
cloudant/python-cloudant
src/cloudant/replicator.py
Replicator.stop_replication
def stop_replication(self, repl_id): """ Stops a replication based on the provided replication id by deleting the replication document from the replication database. The replication can only be stopped if it has not yet completed. If it has already completed then the replication document is still deleted from replication database. :param str repl_id: Replication id used to identify the replication to stop. """ try: repl_doc = self.database[repl_id] except KeyError: raise CloudantReplicatorException(404, repl_id) repl_doc.fetch() repl_doc.delete()
python
def stop_replication(self, repl_id): """ Stops a replication based on the provided replication id by deleting the replication document from the replication database. The replication can only be stopped if it has not yet completed. If it has already completed then the replication document is still deleted from replication database. :param str repl_id: Replication id used to identify the replication to stop. """ try: repl_doc = self.database[repl_id] except KeyError: raise CloudantReplicatorException(404, repl_id) repl_doc.fetch() repl_doc.delete()
[ "def", "stop_replication", "(", "self", ",", "repl_id", ")", ":", "try", ":", "repl_doc", "=", "self", ".", "database", "[", "repl_id", "]", "except", "KeyError", ":", "raise", "CloudantReplicatorException", "(", "404", ",", "repl_id", ")", "repl_doc", ".", ...
Stops a replication based on the provided replication id by deleting the replication document from the replication database. The replication can only be stopped if it has not yet completed. If it has already completed then the replication document is still deleted from replication database. :param str repl_id: Replication id used to identify the replication to stop.
[ "Stops", "a", "replication", "based", "on", "the", "provided", "replication", "id", "by", "deleting", "the", "replication", "document", "from", "the", "replication", "database", ".", "The", "replication", "can", "only", "be", "stopped", "if", "it", "has", "not...
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/replicator.py#L208-L226
train
206,410
cloudant/python-cloudant
src/cloudant/_client_session.py
ClientSession.base64_user_pass
def base64_user_pass(self): """ Composes a basic http auth string, suitable for use with the _replicator database, and other places that need it. :returns: Basic http authentication string """ if self._username is None or self._password is None: return None hash_ = base64.urlsafe_b64encode(bytes_("{username}:{password}".format( username=self._username, password=self._password ))) return "Basic {0}".format(unicode_(hash_))
python
def base64_user_pass(self): """ Composes a basic http auth string, suitable for use with the _replicator database, and other places that need it. :returns: Basic http authentication string """ if self._username is None or self._password is None: return None hash_ = base64.urlsafe_b64encode(bytes_("{username}:{password}".format( username=self._username, password=self._password ))) return "Basic {0}".format(unicode_(hash_))
[ "def", "base64_user_pass", "(", "self", ")", ":", "if", "self", ".", "_username", "is", "None", "or", "self", ".", "_password", "is", "None", ":", "return", "None", "hash_", "=", "base64", ".", "urlsafe_b64encode", "(", "bytes_", "(", "\"{username}:{password...
Composes a basic http auth string, suitable for use with the _replicator database, and other places that need it. :returns: Basic http authentication string
[ "Composes", "a", "basic", "http", "auth", "string", "suitable", "for", "use", "with", "the", "_replicator", "database", "and", "other", "places", "that", "need", "it", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/_client_session.py#L44-L58
train
206,411
cloudant/python-cloudant
src/cloudant/_client_session.py
ClientSession.request
def request(self, method, url, **kwargs): """ Overrides ``requests.Session.request`` to set the timeout. """ resp = super(ClientSession, self).request( method, url, timeout=self._timeout, **kwargs) return resp
python
def request(self, method, url, **kwargs): """ Overrides ``requests.Session.request`` to set the timeout. """ resp = super(ClientSession, self).request( method, url, timeout=self._timeout, **kwargs) return resp
[ "def", "request", "(", "self", ",", "method", ",", "url", ",", "*", "*", "kwargs", ")", ":", "resp", "=", "super", "(", "ClientSession", ",", "self", ")", ".", "request", "(", "method", ",", "url", ",", "timeout", "=", "self", ".", "_timeout", ",",...
Overrides ``requests.Session.request`` to set the timeout.
[ "Overrides", "requests", ".", "Session", ".", "request", "to", "set", "the", "timeout", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/_client_session.py#L61-L68
train
206,412
cloudant/python-cloudant
src/cloudant/_client_session.py
ClientSession.info
def info(self): """ Get session information. """ if self._session_url is None: return None resp = self.get(self._session_url) resp.raise_for_status() return response_to_json_dict(resp)
python
def info(self): """ Get session information. """ if self._session_url is None: return None resp = self.get(self._session_url) resp.raise_for_status() return response_to_json_dict(resp)
[ "def", "info", "(", "self", ")", ":", "if", "self", ".", "_session_url", "is", "None", ":", "return", "None", "resp", "=", "self", ".", "get", "(", "self", ".", "_session_url", ")", "resp", ".", "raise_for_status", "(", ")", "return", "response_to_json_d...
Get session information.
[ "Get", "session", "information", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/_client_session.py#L70-L79
train
206,413
cloudant/python-cloudant
src/cloudant/_client_session.py
ClientSession.set_credentials
def set_credentials(self, username, password): """ Set a new username and password. :param str username: New username. :param str password: New password. """ if username is not None: self._username = username if password is not None: self._password = password
python
def set_credentials(self, username, password): """ Set a new username and password. :param str username: New username. :param str password: New password. """ if username is not None: self._username = username if password is not None: self._password = password
[ "def", "set_credentials", "(", "self", ",", "username", ",", "password", ")", ":", "if", "username", "is", "not", "None", ":", "self", ".", "_username", "=", "username", "if", "password", "is", "not", "None", ":", "self", ".", "_password", "=", "password...
Set a new username and password. :param str username: New username. :param str password: New password.
[ "Set", "a", "new", "username", "and", "password", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/_client_session.py#L81-L92
train
206,414
cloudant/python-cloudant
src/cloudant/_client_session.py
BasicSession.request
def request(self, method, url, **kwargs): """ Overrides ``requests.Session.request`` to provide basic access authentication. """ auth = None if self._username is not None and self._password is not None: auth = (self._username, self._password) return super(BasicSession, self).request( method, url, auth=auth, **kwargs)
python
def request(self, method, url, **kwargs): """ Overrides ``requests.Session.request`` to provide basic access authentication. """ auth = None if self._username is not None and self._password is not None: auth = (self._username, self._password) return super(BasicSession, self).request( method, url, auth=auth, **kwargs)
[ "def", "request", "(", "self", ",", "method", ",", "url", ",", "*", "*", "kwargs", ")", ":", "auth", "=", "None", "if", "self", ".", "_username", "is", "not", "None", "and", "self", ".", "_password", "is", "not", "None", ":", "auth", "=", "(", "s...
Overrides ``requests.Session.request`` to provide basic access authentication.
[ "Overrides", "requests", ".", "Session", ".", "request", "to", "provide", "basic", "access", "authentication", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/_client_session.py#L121-L131
train
206,415
cloudant/python-cloudant
src/cloudant/_client_session.py
CookieSession.login
def login(self): """ Perform cookie based user login. """ resp = super(CookieSession, self).request( 'POST', self._session_url, data={'name': self._username, 'password': self._password}, ) resp.raise_for_status()
python
def login(self): """ Perform cookie based user login. """ resp = super(CookieSession, self).request( 'POST', self._session_url, data={'name': self._username, 'password': self._password}, ) resp.raise_for_status()
[ "def", "login", "(", "self", ")", ":", "resp", "=", "super", "(", "CookieSession", ",", "self", ")", ".", "request", "(", "'POST'", ",", "self", ".", "_session_url", ",", "data", "=", "{", "'name'", ":", "self", ".", "_username", ",", "'password'", "...
Perform cookie based user login.
[ "Perform", "cookie", "based", "user", "login", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/_client_session.py#L146-L155
train
206,416
cloudant/python-cloudant
src/cloudant/_client_session.py
CookieSession.logout
def logout(self): """ Logout cookie based user. """ resp = super(CookieSession, self).request('DELETE', self._session_url) resp.raise_for_status()
python
def logout(self): """ Logout cookie based user. """ resp = super(CookieSession, self).request('DELETE', self._session_url) resp.raise_for_status()
[ "def", "logout", "(", "self", ")", ":", "resp", "=", "super", "(", "CookieSession", ",", "self", ")", ".", "request", "(", "'DELETE'", ",", "self", ".", "_session_url", ")", "resp", ".", "raise_for_status", "(", ")" ]
Logout cookie based user.
[ "Logout", "cookie", "based", "user", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/_client_session.py#L157-L162
train
206,417
cloudant/python-cloudant
src/cloudant/_client_session.py
IAMSession.login
def login(self): """ Perform IAM cookie based user login. """ access_token = self._get_access_token() try: super(IAMSession, self).request( 'POST', self._session_url, headers={'Content-Type': 'application/json'}, data=json.dumps({'access_token': access_token}) ).raise_for_status() except RequestException: raise CloudantException( 'Failed to exchange IAM token with Cloudant')
python
def login(self): """ Perform IAM cookie based user login. """ access_token = self._get_access_token() try: super(IAMSession, self).request( 'POST', self._session_url, headers={'Content-Type': 'application/json'}, data=json.dumps({'access_token': access_token}) ).raise_for_status() except RequestException: raise CloudantException( 'Failed to exchange IAM token with Cloudant')
[ "def", "login", "(", "self", ")", ":", "access_token", "=", "self", ".", "_get_access_token", "(", ")", "try", ":", "super", "(", "IAMSession", ",", "self", ")", ".", "request", "(", "'POST'", ",", "self", ".", "_session_url", ",", "headers", "=", "{",...
Perform IAM cookie based user login.
[ "Perform", "IAM", "cookie", "based", "user", "login", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/_client_session.py#L214-L229
train
206,418
cloudant/python-cloudant
src/cloudant/_client_session.py
IAMSession._get_access_token
def _get_access_token(self): """ Get IAM access token using API key. """ err = 'Failed to contact IAM token service' try: resp = super(IAMSession, self).request( 'POST', self._token_url, auth=self._token_auth, headers={'Accepts': 'application/json'}, data={ 'grant_type': 'urn:ibm:params:oauth:grant-type:apikey', 'response_type': 'cloud_iam', 'apikey': self._api_key } ) err = response_to_json_dict(resp).get('errorMessage', err) resp.raise_for_status() return response_to_json_dict(resp)['access_token'] except KeyError: raise CloudantException('Invalid response from IAM token service') except RequestException: raise CloudantException(err)
python
def _get_access_token(self): """ Get IAM access token using API key. """ err = 'Failed to contact IAM token service' try: resp = super(IAMSession, self).request( 'POST', self._token_url, auth=self._token_auth, headers={'Accepts': 'application/json'}, data={ 'grant_type': 'urn:ibm:params:oauth:grant-type:apikey', 'response_type': 'cloud_iam', 'apikey': self._api_key } ) err = response_to_json_dict(resp).get('errorMessage', err) resp.raise_for_status() return response_to_json_dict(resp)['access_token'] except KeyError: raise CloudantException('Invalid response from IAM token service') except RequestException: raise CloudantException(err)
[ "def", "_get_access_token", "(", "self", ")", ":", "err", "=", "'Failed to contact IAM token service'", "try", ":", "resp", "=", "super", "(", "IAMSession", ",", "self", ")", ".", "request", "(", "'POST'", ",", "self", ".", "_token_url", ",", "auth", "=", ...
Get IAM access token using API key.
[ "Get", "IAM", "access", "token", "using", "API", "key", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/_client_session.py#L275-L301
train
206,419
cloudant/python-cloudant
src/cloudant/document.py
Document.document_url
def document_url(self): """ Constructs and returns the document URL. :returns: Document URL """ if '_id' not in self or self['_id'] is None: return None # handle design document url if self['_id'].startswith('_design/'): return '/'.join(( self._database_host, url_quote_plus(self._database_name), '_design', url_quote(self['_id'][8:], safe='') )) # handle document url return '/'.join(( self._database_host, url_quote_plus(self._database_name), url_quote(self['_id'], safe='') ))
python
def document_url(self): """ Constructs and returns the document URL. :returns: Document URL """ if '_id' not in self or self['_id'] is None: return None # handle design document url if self['_id'].startswith('_design/'): return '/'.join(( self._database_host, url_quote_plus(self._database_name), '_design', url_quote(self['_id'][8:], safe='') )) # handle document url return '/'.join(( self._database_host, url_quote_plus(self._database_name), url_quote(self['_id'], safe='') ))
[ "def", "document_url", "(", "self", ")", ":", "if", "'_id'", "not", "in", "self", "or", "self", "[", "'_id'", "]", "is", "None", ":", "return", "None", "# handle design document url", "if", "self", "[", "'_id'", "]", ".", "startswith", "(", "'_design/'", ...
Constructs and returns the document URL. :returns: Document URL
[ "Constructs", "and", "returns", "the", "document", "URL", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/document.py#L81-L104
train
206,420
cloudant/python-cloudant
src/cloudant/document.py
Document.exists
def exists(self): """ Retrieves whether the document exists in the remote database or not. :returns: True if the document exists in the remote database, otherwise False """ if '_id' not in self or self['_id'] is None: return False resp = self.r_session.head(self.document_url) if resp.status_code not in [200, 404]: resp.raise_for_status() return resp.status_code == 200
python
def exists(self): """ Retrieves whether the document exists in the remote database or not. :returns: True if the document exists in the remote database, otherwise False """ if '_id' not in self or self['_id'] is None: return False resp = self.r_session.head(self.document_url) if resp.status_code not in [200, 404]: resp.raise_for_status() return resp.status_code == 200
[ "def", "exists", "(", "self", ")", ":", "if", "'_id'", "not", "in", "self", "or", "self", "[", "'_id'", "]", "is", "None", ":", "return", "False", "resp", "=", "self", ".", "r_session", ".", "head", "(", "self", ".", "document_url", ")", "if", "res...
Retrieves whether the document exists in the remote database or not. :returns: True if the document exists in the remote database, otherwise False
[ "Retrieves", "whether", "the", "document", "exists", "in", "the", "remote", "database", "or", "not", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/document.py#L106-L120
train
206,421
cloudant/python-cloudant
src/cloudant/document.py
Document.create
def create(self): """ Creates the current document in the remote database and if successful, updates the locally cached Document object with the ``_id`` and ``_rev`` returned as part of the successful response. """ # Ensure that an existing document will not be "updated" doc = dict(self) if doc.get('_rev') is not None: doc.__delitem__('_rev') headers = {'Content-Type': 'application/json'} resp = self.r_session.post( self._database.database_url, headers=headers, data=json.dumps(doc, cls=self.encoder) ) resp.raise_for_status() data = response_to_json_dict(resp) super(Document, self).__setitem__('_id', data['id']) super(Document, self).__setitem__('_rev', data['rev'])
python
def create(self): """ Creates the current document in the remote database and if successful, updates the locally cached Document object with the ``_id`` and ``_rev`` returned as part of the successful response. """ # Ensure that an existing document will not be "updated" doc = dict(self) if doc.get('_rev') is not None: doc.__delitem__('_rev') headers = {'Content-Type': 'application/json'} resp = self.r_session.post( self._database.database_url, headers=headers, data=json.dumps(doc, cls=self.encoder) ) resp.raise_for_status() data = response_to_json_dict(resp) super(Document, self).__setitem__('_id', data['id']) super(Document, self).__setitem__('_rev', data['rev'])
[ "def", "create", "(", "self", ")", ":", "# Ensure that an existing document will not be \"updated\"", "doc", "=", "dict", "(", "self", ")", "if", "doc", ".", "get", "(", "'_rev'", ")", "is", "not", "None", ":", "doc", ".", "__delitem__", "(", "'_rev'", ")", ...
Creates the current document in the remote database and if successful, updates the locally cached Document object with the ``_id`` and ``_rev`` returned as part of the successful response.
[ "Creates", "the", "current", "document", "in", "the", "remote", "database", "and", "if", "successful", "updates", "the", "locally", "cached", "Document", "object", "with", "the", "_id", "and", "_rev", "returned", "as", "part", "of", "the", "successful", "respo...
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/document.py#L132-L153
train
206,422
cloudant/python-cloudant
src/cloudant/document.py
Document.fetch
def fetch(self): """ Retrieves the content of the current document from the remote database and populates the locally cached Document object with that content. A call to fetch will overwrite any dictionary content currently in the locally cached Document object. """ if self.document_url is None: raise CloudantDocumentException(101) resp = self.r_session.get(self.document_url) resp.raise_for_status() self.clear() self.update(response_to_json_dict(resp, cls=self.decoder))
python
def fetch(self): """ Retrieves the content of the current document from the remote database and populates the locally cached Document object with that content. A call to fetch will overwrite any dictionary content currently in the locally cached Document object. """ if self.document_url is None: raise CloudantDocumentException(101) resp = self.r_session.get(self.document_url) resp.raise_for_status() self.clear() self.update(response_to_json_dict(resp, cls=self.decoder))
[ "def", "fetch", "(", "self", ")", ":", "if", "self", ".", "document_url", "is", "None", ":", "raise", "CloudantDocumentException", "(", "101", ")", "resp", "=", "self", ".", "r_session", ".", "get", "(", "self", ".", "document_url", ")", "resp", ".", "...
Retrieves the content of the current document from the remote database and populates the locally cached Document object with that content. A call to fetch will overwrite any dictionary content currently in the locally cached Document object.
[ "Retrieves", "the", "content", "of", "the", "current", "document", "from", "the", "remote", "database", "and", "populates", "the", "locally", "cached", "Document", "object", "with", "that", "content", ".", "A", "call", "to", "fetch", "will", "overwrite", "any"...
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/document.py#L155-L167
train
206,423
cloudant/python-cloudant
src/cloudant/document.py
Document.save
def save(self): """ Saves changes made to the locally cached Document object's data structures to the remote database. If the document does not exist remotely then it is created in the remote database. If the object does exist remotely then the document is updated remotely. In either case the locally cached Document object is also updated accordingly based on the successful response of the operation. """ headers = {} headers.setdefault('Content-Type', 'application/json') if not self.exists(): self.create() return put_resp = self.r_session.put( self.document_url, data=self.json(), headers=headers ) put_resp.raise_for_status() data = response_to_json_dict(put_resp) super(Document, self).__setitem__('_rev', data['rev']) return
python
def save(self): """ Saves changes made to the locally cached Document object's data structures to the remote database. If the document does not exist remotely then it is created in the remote database. If the object does exist remotely then the document is updated remotely. In either case the locally cached Document object is also updated accordingly based on the successful response of the operation. """ headers = {} headers.setdefault('Content-Type', 'application/json') if not self.exists(): self.create() return put_resp = self.r_session.put( self.document_url, data=self.json(), headers=headers ) put_resp.raise_for_status() data = response_to_json_dict(put_resp) super(Document, self).__setitem__('_rev', data['rev']) return
[ "def", "save", "(", "self", ")", ":", "headers", "=", "{", "}", "headers", ".", "setdefault", "(", "'Content-Type'", ",", "'application/json'", ")", "if", "not", "self", ".", "exists", "(", ")", ":", "self", ".", "create", "(", ")", "return", "put_resp...
Saves changes made to the locally cached Document object's data structures to the remote database. If the document does not exist remotely then it is created in the remote database. If the object does exist remotely then the document is updated remotely. In either case the locally cached Document object is also updated accordingly based on the successful response of the operation.
[ "Saves", "changes", "made", "to", "the", "locally", "cached", "Document", "object", "s", "data", "structures", "to", "the", "remote", "database", ".", "If", "the", "document", "does", "not", "exist", "remotely", "then", "it", "is", "created", "in", "the", ...
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/document.py#L169-L191
train
206,424
cloudant/python-cloudant
src/cloudant/document.py
Document.list_field_append
def list_field_append(doc, field, value): """ Appends a value to a list field in a locally cached Document object. If a field does not exist it will be created first. :param Document doc: Locally cached Document object that can be a Document, DesignDocument or dict. :param str field: Name of the field list to append to. :param value: Value to append to the field list. """ if doc.get(field) is None: doc[field] = [] if not isinstance(doc[field], list): raise CloudantDocumentException(102, field) if value is not None: doc[field].append(value)
python
def list_field_append(doc, field, value): """ Appends a value to a list field in a locally cached Document object. If a field does not exist it will be created first. :param Document doc: Locally cached Document object that can be a Document, DesignDocument or dict. :param str field: Name of the field list to append to. :param value: Value to append to the field list. """ if doc.get(field) is None: doc[field] = [] if not isinstance(doc[field], list): raise CloudantDocumentException(102, field) if value is not None: doc[field].append(value)
[ "def", "list_field_append", "(", "doc", ",", "field", ",", "value", ")", ":", "if", "doc", ".", "get", "(", "field", ")", "is", "None", ":", "doc", "[", "field", "]", "=", "[", "]", "if", "not", "isinstance", "(", "doc", "[", "field", "]", ",", ...
Appends a value to a list field in a locally cached Document object. If a field does not exist it will be created first. :param Document doc: Locally cached Document object that can be a Document, DesignDocument or dict. :param str field: Name of the field list to append to. :param value: Value to append to the field list.
[ "Appends", "a", "value", "to", "a", "list", "field", "in", "a", "locally", "cached", "Document", "object", ".", "If", "a", "field", "does", "not", "exist", "it", "will", "be", "created", "first", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/document.py#L196-L211
train
206,425
cloudant/python-cloudant
src/cloudant/document.py
Document.list_field_remove
def list_field_remove(doc, field, value): """ Removes a value from a list field in a locally cached Document object. :param Document doc: Locally cached Document object that can be a Document, DesignDocument or dict. :param str field: Name of the field list to remove from. :param value: Value to remove from the field list. """ if not isinstance(doc[field], list): raise CloudantDocumentException(102, field) doc[field].remove(value)
python
def list_field_remove(doc, field, value): """ Removes a value from a list field in a locally cached Document object. :param Document doc: Locally cached Document object that can be a Document, DesignDocument or dict. :param str field: Name of the field list to remove from. :param value: Value to remove from the field list. """ if not isinstance(doc[field], list): raise CloudantDocumentException(102, field) doc[field].remove(value)
[ "def", "list_field_remove", "(", "doc", ",", "field", ",", "value", ")", ":", "if", "not", "isinstance", "(", "doc", "[", "field", "]", ",", "list", ")", ":", "raise", "CloudantDocumentException", "(", "102", ",", "field", ")", "doc", "[", "field", "]"...
Removes a value from a list field in a locally cached Document object. :param Document doc: Locally cached Document object that can be a Document, DesignDocument or dict. :param str field: Name of the field list to remove from. :param value: Value to remove from the field list.
[ "Removes", "a", "value", "from", "a", "list", "field", "in", "a", "locally", "cached", "Document", "object", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/document.py#L214-L225
train
206,426
cloudant/python-cloudant
src/cloudant/document.py
Document.field_set
def field_set(doc, field, value): """ Sets or replaces a value for a field in a locally cached Document object. To remove the field set the ``value`` to None. :param Document doc: Locally cached Document object that can be a Document, DesignDocument or dict. :param str field: Name of the field to set. :param value: Value to set the field to. """ if value is None: doc.__delitem__(field) else: doc[field] = value
python
def field_set(doc, field, value): """ Sets or replaces a value for a field in a locally cached Document object. To remove the field set the ``value`` to None. :param Document doc: Locally cached Document object that can be a Document, DesignDocument or dict. :param str field: Name of the field to set. :param value: Value to set the field to. """ if value is None: doc.__delitem__(field) else: doc[field] = value
[ "def", "field_set", "(", "doc", ",", "field", ",", "value", ")", ":", "if", "value", "is", "None", ":", "doc", ".", "__delitem__", "(", "field", ")", "else", ":", "doc", "[", "field", "]", "=", "value" ]
Sets or replaces a value for a field in a locally cached Document object. To remove the field set the ``value`` to None. :param Document doc: Locally cached Document object that can be a Document, DesignDocument or dict. :param str field: Name of the field to set. :param value: Value to set the field to.
[ "Sets", "or", "replaces", "a", "value", "for", "a", "field", "in", "a", "locally", "cached", "Document", "object", ".", "To", "remove", "the", "field", "set", "the", "value", "to", "None", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/document.py#L228-L241
train
206,427
cloudant/python-cloudant
src/cloudant/document.py
Document._update_field
def _update_field(self, action, field, value, max_tries, tries=0): """ Private update_field method. Wrapped by Document.update_field. Tracks a "tries" var to help limit recursion. """ # Refresh our view of the document. self.fetch() # Update the field. action(self, field, value) # Attempt to save, retrying conflicts up to max_tries. try: self.save() except requests.HTTPError as ex: if tries < max_tries and ex.response.status_code == 409: self._update_field( action, field, value, max_tries, tries=tries+1) else: raise
python
def _update_field(self, action, field, value, max_tries, tries=0): """ Private update_field method. Wrapped by Document.update_field. Tracks a "tries" var to help limit recursion. """ # Refresh our view of the document. self.fetch() # Update the field. action(self, field, value) # Attempt to save, retrying conflicts up to max_tries. try: self.save() except requests.HTTPError as ex: if tries < max_tries and ex.response.status_code == 409: self._update_field( action, field, value, max_tries, tries=tries+1) else: raise
[ "def", "_update_field", "(", "self", ",", "action", ",", "field", ",", "value", ",", "max_tries", ",", "tries", "=", "0", ")", ":", "# Refresh our view of the document.", "self", ".", "fetch", "(", ")", "# Update the field.", "action", "(", "self", ",", "fie...
Private update_field method. Wrapped by Document.update_field. Tracks a "tries" var to help limit recursion.
[ "Private", "update_field", "method", ".", "Wrapped", "by", "Document", ".", "update_field", ".", "Tracks", "a", "tries", "var", "to", "help", "limit", "recursion", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/document.py#L243-L262
train
206,428
cloudant/python-cloudant
src/cloudant/document.py
Document.update_field
def update_field(self, action, field, value, max_tries=10): """ Updates a field in the remote document. If a conflict exists, the document is re-fetched from the remote database and the update is retried. This is performed up to ``max_tries`` number of times. Use this method when you want to update a single field in a document, and don't want to risk clobbering other people's changes to the document in other fields, but also don't want the caller to implement logic to deal with conflicts. For example: .. code-block:: python # Append the string 'foo' to the 'words' list of Document doc. doc.update_field( action=doc.list_field_append, field='words', value='foo' ) :param callable action: A routine that takes a Document object, a field name, and a value. The routine should attempt to update a field in the locally cached Document object with the given value, using whatever logic is appropriate. Valid actions are :func:`~cloudant.document.Document.list_field_append`, :func:`~cloudant.document.Document.list_field_remove`, :func:`~cloudant.document.Document.field_set` :param str field: Name of the field to update :param value: Value to update the field with :param int max_tries: In the case of a conflict, the number of retries to attempt """ self._update_field(action, field, value, max_tries)
python
def update_field(self, action, field, value, max_tries=10): """ Updates a field in the remote document. If a conflict exists, the document is re-fetched from the remote database and the update is retried. This is performed up to ``max_tries`` number of times. Use this method when you want to update a single field in a document, and don't want to risk clobbering other people's changes to the document in other fields, but also don't want the caller to implement logic to deal with conflicts. For example: .. code-block:: python # Append the string 'foo' to the 'words' list of Document doc. doc.update_field( action=doc.list_field_append, field='words', value='foo' ) :param callable action: A routine that takes a Document object, a field name, and a value. The routine should attempt to update a field in the locally cached Document object with the given value, using whatever logic is appropriate. Valid actions are :func:`~cloudant.document.Document.list_field_append`, :func:`~cloudant.document.Document.list_field_remove`, :func:`~cloudant.document.Document.field_set` :param str field: Name of the field to update :param value: Value to update the field with :param int max_tries: In the case of a conflict, the number of retries to attempt """ self._update_field(action, field, value, max_tries)
[ "def", "update_field", "(", "self", ",", "action", ",", "field", ",", "value", ",", "max_tries", "=", "10", ")", ":", "self", ".", "_update_field", "(", "action", ",", "field", ",", "value", ",", "max_tries", ")" ]
Updates a field in the remote document. If a conflict exists, the document is re-fetched from the remote database and the update is retried. This is performed up to ``max_tries`` number of times. Use this method when you want to update a single field in a document, and don't want to risk clobbering other people's changes to the document in other fields, but also don't want the caller to implement logic to deal with conflicts. For example: .. code-block:: python # Append the string 'foo' to the 'words' list of Document doc. doc.update_field( action=doc.list_field_append, field='words', value='foo' ) :param callable action: A routine that takes a Document object, a field name, and a value. The routine should attempt to update a field in the locally cached Document object with the given value, using whatever logic is appropriate. Valid actions are :func:`~cloudant.document.Document.list_field_append`, :func:`~cloudant.document.Document.list_field_remove`, :func:`~cloudant.document.Document.field_set` :param str field: Name of the field to update :param value: Value to update the field with :param int max_tries: In the case of a conflict, the number of retries to attempt
[ "Updates", "a", "field", "in", "the", "remote", "document", ".", "If", "a", "conflict", "exists", "the", "document", "is", "re", "-", "fetched", "from", "the", "remote", "database", "and", "the", "update", "is", "retried", ".", "This", "is", "performed", ...
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/document.py#L264-L299
train
206,429
cloudant/python-cloudant
src/cloudant/document.py
Document.delete
def delete(self): """ Removes the document from the remote database and clears the content of the locally cached Document object with the exception of the ``_id`` field. In order to successfully remove a document from the remote database, a ``_rev`` value must exist in the locally cached Document object. """ if not self.get("_rev"): raise CloudantDocumentException(103) del_resp = self.r_session.delete( self.document_url, params={"rev": self["_rev"]}, ) del_resp.raise_for_status() _id = self['_id'] self.clear() self['_id'] = _id
python
def delete(self): """ Removes the document from the remote database and clears the content of the locally cached Document object with the exception of the ``_id`` field. In order to successfully remove a document from the remote database, a ``_rev`` value must exist in the locally cached Document object. """ if not self.get("_rev"): raise CloudantDocumentException(103) del_resp = self.r_session.delete( self.document_url, params={"rev": self["_rev"]}, ) del_resp.raise_for_status() _id = self['_id'] self.clear() self['_id'] = _id
[ "def", "delete", "(", "self", ")", ":", "if", "not", "self", ".", "get", "(", "\"_rev\"", ")", ":", "raise", "CloudantDocumentException", "(", "103", ")", "del_resp", "=", "self", ".", "r_session", ".", "delete", "(", "self", ".", "document_url", ",", ...
Removes the document from the remote database and clears the content of the locally cached Document object with the exception of the ``_id`` field. In order to successfully remove a document from the remote database, a ``_rev`` value must exist in the locally cached Document object.
[ "Removes", "the", "document", "from", "the", "remote", "database", "and", "clears", "the", "content", "of", "the", "locally", "cached", "Document", "object", "with", "the", "exception", "of", "the", "_id", "field", ".", "In", "order", "to", "successfully", "...
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/document.py#L301-L319
train
206,430
cloudant/python-cloudant
src/cloudant/document.py
Document.delete_attachment
def delete_attachment(self, attachment, headers=None): """ Removes an attachment from a remote document and refreshes the locally cached document object. :param str attachment: Attachment file name used to identify the attachment. :param dict headers: Optional, additional headers to be sent with request. :returns: Attachment deletion status in JSON format """ # need latest rev self.fetch() attachment_url = '/'.join((self.document_url, attachment)) if headers is None: headers = {'If-Match': self['_rev']} else: headers['If-Match'] = self['_rev'] resp = self.r_session.delete( attachment_url, headers=headers ) resp.raise_for_status() super(Document, self).__setitem__('_rev', response_to_json_dict(resp)['rev']) # Execute logic only if attachment metadata exists locally if self.get('_attachments'): # Remove the attachment metadata for the specified attachment if self['_attachments'].get(attachment): self['_attachments'].__delitem__(attachment) # Remove empty attachment metadata from the local dictionary if not self['_attachments']: super(Document, self).__delitem__('_attachments') return response_to_json_dict(resp)
python
def delete_attachment(self, attachment, headers=None): """ Removes an attachment from a remote document and refreshes the locally cached document object. :param str attachment: Attachment file name used to identify the attachment. :param dict headers: Optional, additional headers to be sent with request. :returns: Attachment deletion status in JSON format """ # need latest rev self.fetch() attachment_url = '/'.join((self.document_url, attachment)) if headers is None: headers = {'If-Match': self['_rev']} else: headers['If-Match'] = self['_rev'] resp = self.r_session.delete( attachment_url, headers=headers ) resp.raise_for_status() super(Document, self).__setitem__('_rev', response_to_json_dict(resp)['rev']) # Execute logic only if attachment metadata exists locally if self.get('_attachments'): # Remove the attachment metadata for the specified attachment if self['_attachments'].get(attachment): self['_attachments'].__delitem__(attachment) # Remove empty attachment metadata from the local dictionary if not self['_attachments']: super(Document, self).__delitem__('_attachments') return response_to_json_dict(resp)
[ "def", "delete_attachment", "(", "self", ",", "attachment", ",", "headers", "=", "None", ")", ":", "# need latest rev", "self", ".", "fetch", "(", ")", "attachment_url", "=", "'/'", ".", "join", "(", "(", "self", ".", "document_url", ",", "attachment", ")"...
Removes an attachment from a remote document and refreshes the locally cached document object. :param str attachment: Attachment file name used to identify the attachment. :param dict headers: Optional, additional headers to be sent with request. :returns: Attachment deletion status in JSON format
[ "Removes", "an", "attachment", "from", "a", "remote", "document", "and", "refreshes", "the", "locally", "cached", "document", "object", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/document.py#L407-L442
train
206,431
cloudant/python-cloudant
src/cloudant/document.py
Document.put_attachment
def put_attachment(self, attachment, content_type, data, headers=None): """ Adds a new attachment, or updates an existing attachment, to the remote document and refreshes the locally cached Document object accordingly. :param attachment: Attachment file name used to identify the attachment. :param content_type: The http ``Content-Type`` of the attachment used as an additional header. :param data: Attachment data defining the attachment content. :param headers: Optional, additional headers to be sent with request. :returns: Attachment addition/update status in JSON format """ # need latest rev self.fetch() attachment_url = '/'.join((self.document_url, attachment)) if headers is None: headers = { 'If-Match': self['_rev'], 'Content-Type': content_type } else: headers['If-Match'] = self['_rev'] headers['Content-Type'] = content_type resp = self.r_session.put( attachment_url, data=data, headers=headers ) resp.raise_for_status() self.fetch() return response_to_json_dict(resp)
python
def put_attachment(self, attachment, content_type, data, headers=None): """ Adds a new attachment, or updates an existing attachment, to the remote document and refreshes the locally cached Document object accordingly. :param attachment: Attachment file name used to identify the attachment. :param content_type: The http ``Content-Type`` of the attachment used as an additional header. :param data: Attachment data defining the attachment content. :param headers: Optional, additional headers to be sent with request. :returns: Attachment addition/update status in JSON format """ # need latest rev self.fetch() attachment_url = '/'.join((self.document_url, attachment)) if headers is None: headers = { 'If-Match': self['_rev'], 'Content-Type': content_type } else: headers['If-Match'] = self['_rev'] headers['Content-Type'] = content_type resp = self.r_session.put( attachment_url, data=data, headers=headers ) resp.raise_for_status() self.fetch() return response_to_json_dict(resp)
[ "def", "put_attachment", "(", "self", ",", "attachment", ",", "content_type", ",", "data", ",", "headers", "=", "None", ")", ":", "# need latest rev", "self", ".", "fetch", "(", ")", "attachment_url", "=", "'/'", ".", "join", "(", "(", "self", ".", "docu...
Adds a new attachment, or updates an existing attachment, to the remote document and refreshes the locally cached Document object accordingly. :param attachment: Attachment file name used to identify the attachment. :param content_type: The http ``Content-Type`` of the attachment used as an additional header. :param data: Attachment data defining the attachment content. :param headers: Optional, additional headers to be sent with request. :returns: Attachment addition/update status in JSON format
[ "Adds", "a", "new", "attachment", "or", "updates", "an", "existing", "attachment", "to", "the", "remote", "document", "and", "refreshes", "the", "locally", "cached", "Document", "object", "accordingly", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/document.py#L444-L479
train
206,432
cloudant/python-cloudant
src/cloudant/_common_util.py
py_to_couch_validate
def py_to_couch_validate(key, val): """ Validates the individual parameter key and value. """ if key not in RESULT_ARG_TYPES: raise CloudantArgumentError(116, key) # pylint: disable=unidiomatic-typecheck # Validate argument values and ensure that a boolean is not passed in # if an integer is expected if (not isinstance(val, RESULT_ARG_TYPES[key]) or (type(val) is bool and int in RESULT_ARG_TYPES[key])): raise CloudantArgumentError(117, key, RESULT_ARG_TYPES[key]) if key == 'keys': for key_list_val in val: if (not isinstance(key_list_val, RESULT_ARG_TYPES['key']) or type(key_list_val) is bool): raise CloudantArgumentError(134, RESULT_ARG_TYPES['key']) if key == 'stale': if val not in ('ok', 'update_after'): raise CloudantArgumentError(135, val)
python
def py_to_couch_validate(key, val): """ Validates the individual parameter key and value. """ if key not in RESULT_ARG_TYPES: raise CloudantArgumentError(116, key) # pylint: disable=unidiomatic-typecheck # Validate argument values and ensure that a boolean is not passed in # if an integer is expected if (not isinstance(val, RESULT_ARG_TYPES[key]) or (type(val) is bool and int in RESULT_ARG_TYPES[key])): raise CloudantArgumentError(117, key, RESULT_ARG_TYPES[key]) if key == 'keys': for key_list_val in val: if (not isinstance(key_list_val, RESULT_ARG_TYPES['key']) or type(key_list_val) is bool): raise CloudantArgumentError(134, RESULT_ARG_TYPES['key']) if key == 'stale': if val not in ('ok', 'update_after'): raise CloudantArgumentError(135, val)
[ "def", "py_to_couch_validate", "(", "key", ",", "val", ")", ":", "if", "key", "not", "in", "RESULT_ARG_TYPES", ":", "raise", "CloudantArgumentError", "(", "116", ",", "key", ")", "# pylint: disable=unidiomatic-typecheck", "# Validate argument values and ensure that a bool...
Validates the individual parameter key and value.
[ "Validates", "the", "individual", "parameter", "key", "and", "value", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/_common_util.py#L179-L198
train
206,433
cloudant/python-cloudant
src/cloudant/_common_util.py
_py_to_couch_translate
def _py_to_couch_translate(key, val): """ Performs the conversion of the Python parameter value to its CouchDB equivalent. """ try: if key in ['keys', 'endkey_docid', 'startkey_docid', 'stale', 'update']: return {key: val} if val is None: return {key: None} arg_converter = TYPE_CONVERTERS.get(type(val)) return {key: arg_converter(val)} except Exception as ex: raise CloudantArgumentError(136, key, ex)
python
def _py_to_couch_translate(key, val): """ Performs the conversion of the Python parameter value to its CouchDB equivalent. """ try: if key in ['keys', 'endkey_docid', 'startkey_docid', 'stale', 'update']: return {key: val} if val is None: return {key: None} arg_converter = TYPE_CONVERTERS.get(type(val)) return {key: arg_converter(val)} except Exception as ex: raise CloudantArgumentError(136, key, ex)
[ "def", "_py_to_couch_translate", "(", "key", ",", "val", ")", ":", "try", ":", "if", "key", "in", "[", "'keys'", ",", "'endkey_docid'", ",", "'startkey_docid'", ",", "'stale'", ",", "'update'", "]", ":", "return", "{", "key", ":", "val", "}", "if", "va...
Performs the conversion of the Python parameter value to its CouchDB equivalent.
[ "Performs", "the", "conversion", "of", "the", "Python", "parameter", "value", "to", "its", "CouchDB", "equivalent", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/_common_util.py#L200-L213
train
206,434
cloudant/python-cloudant
src/cloudant/_common_util.py
get_docs
def get_docs(r_session, url, encoder=None, headers=None, **params): """ Provides a helper for functions that require GET or POST requests with a JSON, text, or raw response containing documents. :param r_session: Authentication session from the client :param str url: URL containing the endpoint :param JSONEncoder encoder: Custom encoder from the client :param dict headers: Optional HTTP Headers to send with the request :returns: Raw response content from the specified endpoint """ keys_list = params.pop('keys', None) keys = None if keys_list is not None: keys = json.dumps({'keys': keys_list}, cls=encoder) f_params = python_to_couch(params) resp = None if keys is not None: # If we're using POST we are sending JSON so add the header if headers is None: headers = {} headers['Content-Type'] = 'application/json' resp = r_session.post(url, headers=headers, params=f_params, data=keys) else: resp = r_session.get(url, headers=headers, params=f_params) resp.raise_for_status() return resp
python
def get_docs(r_session, url, encoder=None, headers=None, **params): """ Provides a helper for functions that require GET or POST requests with a JSON, text, or raw response containing documents. :param r_session: Authentication session from the client :param str url: URL containing the endpoint :param JSONEncoder encoder: Custom encoder from the client :param dict headers: Optional HTTP Headers to send with the request :returns: Raw response content from the specified endpoint """ keys_list = params.pop('keys', None) keys = None if keys_list is not None: keys = json.dumps({'keys': keys_list}, cls=encoder) f_params = python_to_couch(params) resp = None if keys is not None: # If we're using POST we are sending JSON so add the header if headers is None: headers = {} headers['Content-Type'] = 'application/json' resp = r_session.post(url, headers=headers, params=f_params, data=keys) else: resp = r_session.get(url, headers=headers, params=f_params) resp.raise_for_status() return resp
[ "def", "get_docs", "(", "r_session", ",", "url", ",", "encoder", "=", "None", ",", "headers", "=", "None", ",", "*", "*", "params", ")", ":", "keys_list", "=", "params", ".", "pop", "(", "'keys'", ",", "None", ")", "keys", "=", "None", "if", "keys_...
Provides a helper for functions that require GET or POST requests with a JSON, text, or raw response containing documents. :param r_session: Authentication session from the client :param str url: URL containing the endpoint :param JSONEncoder encoder: Custom encoder from the client :param dict headers: Optional HTTP Headers to send with the request :returns: Raw response content from the specified endpoint
[ "Provides", "a", "helper", "for", "functions", "that", "require", "GET", "or", "POST", "requests", "with", "a", "JSON", "text", "or", "raw", "response", "containing", "documents", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/_common_util.py#L232-L259
train
206,435
cloudant/python-cloudant
src/cloudant/_common_util.py
response_to_json_dict
def response_to_json_dict(response, **kwargs): """ Standard place to convert responses to JSON. :param response: requests response object :param **kwargs: arguments accepted by json.loads :returns: dict of JSON response """ if response.encoding is None: response.encoding = 'utf-8' return json.loads(response.text, **kwargs)
python
def response_to_json_dict(response, **kwargs): """ Standard place to convert responses to JSON. :param response: requests response object :param **kwargs: arguments accepted by json.loads :returns: dict of JSON response """ if response.encoding is None: response.encoding = 'utf-8' return json.loads(response.text, **kwargs)
[ "def", "response_to_json_dict", "(", "response", ",", "*", "*", "kwargs", ")", ":", "if", "response", ".", "encoding", "is", "None", ":", "response", ".", "encoding", "=", "'utf-8'", "return", "json", ".", "loads", "(", "response", ".", "text", ",", "*",...
Standard place to convert responses to JSON. :param response: requests response object :param **kwargs: arguments accepted by json.loads :returns: dict of JSON response
[ "Standard", "place", "to", "convert", "responses", "to", "JSON", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/_common_util.py#L283-L294
train
206,436
cloudant/python-cloudant
src/cloudant/__init__.py
cloudant_iam
def cloudant_iam(account_name, api_key, **kwargs): """ Provides a context manager to create a Cloudant session using IAM authentication and provide access to databases, docs etc. :param account_name: Cloudant account name. :param api_key: IAM authentication API key. For example: .. code-block:: python # cloudant context manager from cloudant import cloudant_iam with cloudant_iam(ACCOUNT_NAME, API_KEY) as client: # Context handles connect() and disconnect() for you. # Perform library operations within this context. Such as: print client.all_dbs() # ... """ cloudant_session = Cloudant.iam(account_name, api_key, **kwargs) cloudant_session.connect() yield cloudant_session cloudant_session.disconnect()
python
def cloudant_iam(account_name, api_key, **kwargs): """ Provides a context manager to create a Cloudant session using IAM authentication and provide access to databases, docs etc. :param account_name: Cloudant account name. :param api_key: IAM authentication API key. For example: .. code-block:: python # cloudant context manager from cloudant import cloudant_iam with cloudant_iam(ACCOUNT_NAME, API_KEY) as client: # Context handles connect() and disconnect() for you. # Perform library operations within this context. Such as: print client.all_dbs() # ... """ cloudant_session = Cloudant.iam(account_name, api_key, **kwargs) cloudant_session.connect() yield cloudant_session cloudant_session.disconnect()
[ "def", "cloudant_iam", "(", "account_name", ",", "api_key", ",", "*", "*", "kwargs", ")", ":", "cloudant_session", "=", "Cloudant", ".", "iam", "(", "account_name", ",", "api_key", ",", "*", "*", "kwargs", ")", "cloudant_session", ".", "connect", "(", ")",...
Provides a context manager to create a Cloudant session using IAM authentication and provide access to databases, docs etc. :param account_name: Cloudant account name. :param api_key: IAM authentication API key. For example: .. code-block:: python # cloudant context manager from cloudant import cloudant_iam with cloudant_iam(ACCOUNT_NAME, API_KEY) as client: # Context handles connect() and disconnect() for you. # Perform library operations within this context. Such as: print client.all_dbs() # ...
[ "Provides", "a", "context", "manager", "to", "create", "a", "Cloudant", "session", "using", "IAM", "authentication", "and", "provide", "access", "to", "databases", "docs", "etc", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/__init__.py#L66-L92
train
206,437
cloudant/python-cloudant
src/cloudant/__init__.py
couchdb
def couchdb(user, passwd, **kwargs): """ Provides a context manager to create a CouchDB session and provide access to databases, docs etc. :param str user: Username used to connect to CouchDB. :param str passwd: Passcode used to connect to CouchDB. :param str url: URL for CouchDB server. :param str encoder: Optional json Encoder object used to encode documents for storage. Defaults to json.JSONEncoder. For example: .. code-block:: python # couchdb context manager from cloudant import couchdb with couchdb(USERNAME, PASSWORD, url=COUCHDB_URL) as client: # Context handles connect() and disconnect() for you. # Perform library operations within this context. Such as: print client.all_dbs() # ... """ couchdb_session = CouchDB(user, passwd, **kwargs) couchdb_session.connect() yield couchdb_session couchdb_session.disconnect()
python
def couchdb(user, passwd, **kwargs): """ Provides a context manager to create a CouchDB session and provide access to databases, docs etc. :param str user: Username used to connect to CouchDB. :param str passwd: Passcode used to connect to CouchDB. :param str url: URL for CouchDB server. :param str encoder: Optional json Encoder object used to encode documents for storage. Defaults to json.JSONEncoder. For example: .. code-block:: python # couchdb context manager from cloudant import couchdb with couchdb(USERNAME, PASSWORD, url=COUCHDB_URL) as client: # Context handles connect() and disconnect() for you. # Perform library operations within this context. Such as: print client.all_dbs() # ... """ couchdb_session = CouchDB(user, passwd, **kwargs) couchdb_session.connect() yield couchdb_session couchdb_session.disconnect()
[ "def", "couchdb", "(", "user", ",", "passwd", ",", "*", "*", "kwargs", ")", ":", "couchdb_session", "=", "CouchDB", "(", "user", ",", "passwd", ",", "*", "*", "kwargs", ")", "couchdb_session", ".", "connect", "(", ")", "yield", "couchdb_session", "couchd...
Provides a context manager to create a CouchDB session and provide access to databases, docs etc. :param str user: Username used to connect to CouchDB. :param str passwd: Passcode used to connect to CouchDB. :param str url: URL for CouchDB server. :param str encoder: Optional json Encoder object used to encode documents for storage. Defaults to json.JSONEncoder. For example: .. code-block:: python # couchdb context manager from cloudant import couchdb with couchdb(USERNAME, PASSWORD, url=COUCHDB_URL) as client: # Context handles connect() and disconnect() for you. # Perform library operations within this context. Such as: print client.all_dbs() # ...
[ "Provides", "a", "context", "manager", "to", "create", "a", "CouchDB", "session", "and", "provide", "access", "to", "databases", "docs", "etc", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/__init__.py#L165-L192
train
206,438
cloudant/python-cloudant
src/cloudant/__init__.py
couchdb_admin_party
def couchdb_admin_party(**kwargs): """ Provides a context manager to create a CouchDB session in Admin Party mode and provide access to databases, docs etc. :param str url: URL for CouchDB server. :param str encoder: Optional json Encoder object used to encode documents for storage. Defaults to json.JSONEncoder. For example: .. code-block:: python # couchdb_admin_party context manager from cloudant import couchdb_admin_party with couchdb_admin_party(url=COUCHDB_URL) as client: # Context handles connect() and disconnect() for you. # Perform library operations within this context. Such as: print client.all_dbs() # ... """ couchdb_session = CouchDB(None, None, True, **kwargs) couchdb_session.connect() yield couchdb_session couchdb_session.disconnect()
python
def couchdb_admin_party(**kwargs): """ Provides a context manager to create a CouchDB session in Admin Party mode and provide access to databases, docs etc. :param str url: URL for CouchDB server. :param str encoder: Optional json Encoder object used to encode documents for storage. Defaults to json.JSONEncoder. For example: .. code-block:: python # couchdb_admin_party context manager from cloudant import couchdb_admin_party with couchdb_admin_party(url=COUCHDB_URL) as client: # Context handles connect() and disconnect() for you. # Perform library operations within this context. Such as: print client.all_dbs() # ... """ couchdb_session = CouchDB(None, None, True, **kwargs) couchdb_session.connect() yield couchdb_session couchdb_session.disconnect()
[ "def", "couchdb_admin_party", "(", "*", "*", "kwargs", ")", ":", "couchdb_session", "=", "CouchDB", "(", "None", ",", "None", ",", "True", ",", "*", "*", "kwargs", ")", "couchdb_session", ".", "connect", "(", ")", "yield", "couchdb_session", "couchdb_session...
Provides a context manager to create a CouchDB session in Admin Party mode and provide access to databases, docs etc. :param str url: URL for CouchDB server. :param str encoder: Optional json Encoder object used to encode documents for storage. Defaults to json.JSONEncoder. For example: .. code-block:: python # couchdb_admin_party context manager from cloudant import couchdb_admin_party with couchdb_admin_party(url=COUCHDB_URL) as client: # Context handles connect() and disconnect() for you. # Perform library operations within this context. Such as: print client.all_dbs() # ...
[ "Provides", "a", "context", "manager", "to", "create", "a", "CouchDB", "session", "in", "Admin", "Party", "mode", "and", "provide", "access", "to", "databases", "docs", "etc", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/__init__.py#L195-L220
train
206,439
cloudant/python-cloudant
src/cloudant/result.py
Result._handle_result_by_index
def _handle_result_by_index(self, idx): """ Handle processing when the result argument provided is an integer. """ if idx < 0: return None opts = dict(self.options) skip = opts.pop('skip', 0) limit = opts.pop('limit', None) py_to_couch_validate('skip', skip) py_to_couch_validate('limit', limit) if limit is not None and idx >= limit: # Result is out of range return dict() return self._ref(skip=skip+idx, limit=1, **opts)
python
def _handle_result_by_index(self, idx): """ Handle processing when the result argument provided is an integer. """ if idx < 0: return None opts = dict(self.options) skip = opts.pop('skip', 0) limit = opts.pop('limit', None) py_to_couch_validate('skip', skip) py_to_couch_validate('limit', limit) if limit is not None and idx >= limit: # Result is out of range return dict() return self._ref(skip=skip+idx, limit=1, **opts)
[ "def", "_handle_result_by_index", "(", "self", ",", "idx", ")", ":", "if", "idx", "<", "0", ":", "return", "None", "opts", "=", "dict", "(", "self", ".", "options", ")", "skip", "=", "opts", ".", "pop", "(", "'skip'", ",", "0", ")", "limit", "=", ...
Handle processing when the result argument provided is an integer.
[ "Handle", "processing", "when", "the", "result", "argument", "provided", "is", "an", "integer", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/result.py#L236-L250
train
206,440
cloudant/python-cloudant
src/cloudant/result.py
Result._handle_result_by_key
def _handle_result_by_key(self, key): """ Handle processing when the result argument provided is a document key. """ invalid_options = ('key', 'keys', 'startkey', 'endkey') if any(x in invalid_options for x in self.options): raise ResultException(102, invalid_options, self.options) return self._ref(key=key, **self.options)
python
def _handle_result_by_key(self, key): """ Handle processing when the result argument provided is a document key. """ invalid_options = ('key', 'keys', 'startkey', 'endkey') if any(x in invalid_options for x in self.options): raise ResultException(102, invalid_options, self.options) return self._ref(key=key, **self.options)
[ "def", "_handle_result_by_key", "(", "self", ",", "key", ")", ":", "invalid_options", "=", "(", "'key'", ",", "'keys'", ",", "'startkey'", ",", "'endkey'", ")", "if", "any", "(", "x", "in", "invalid_options", "for", "x", "in", "self", ".", "options", ")"...
Handle processing when the result argument provided is a document key.
[ "Handle", "processing", "when", "the", "result", "argument", "provided", "is", "a", "document", "key", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/result.py#L252-L259
train
206,441
cloudant/python-cloudant
src/cloudant/result.py
Result._handle_result_by_idx_slice
def _handle_result_by_idx_slice(self, idx_slice): """ Handle processing when the result argument provided is an index slice. """ opts = dict(self.options) skip = opts.pop('skip', 0) limit = opts.pop('limit', None) py_to_couch_validate('skip', skip) py_to_couch_validate('limit', limit) start = idx_slice.start stop = idx_slice.stop data = None # start and stop cannot be None and both must be greater than 0 if all(i is not None and i >= 0 for i in [start, stop]) and start < stop: if limit is not None: if start >= limit: # Result is out of range return dict() if stop > limit: # Ensure that slice does not extend past original limit return self._ref(skip=skip+start, limit=limit-start, **opts) data = self._ref(skip=skip+start, limit=stop-start, **opts) elif start is not None and stop is None and start >= 0: if limit is not None: if start >= limit: # Result is out of range return dict() # Ensure that slice does not extend past original limit data = self._ref(skip=skip+start, limit=limit-start, **opts) else: data = self._ref(skip=skip+start, **opts) elif start is None and stop is not None and stop >= 0: if limit is not None and stop > limit: # Ensure that slice does not extend past original limit data = self._ref(skip=skip, limit=limit, **opts) else: data = self._ref(skip=skip, limit=stop, **opts) return data
python
def _handle_result_by_idx_slice(self, idx_slice): """ Handle processing when the result argument provided is an index slice. """ opts = dict(self.options) skip = opts.pop('skip', 0) limit = opts.pop('limit', None) py_to_couch_validate('skip', skip) py_to_couch_validate('limit', limit) start = idx_slice.start stop = idx_slice.stop data = None # start and stop cannot be None and both must be greater than 0 if all(i is not None and i >= 0 for i in [start, stop]) and start < stop: if limit is not None: if start >= limit: # Result is out of range return dict() if stop > limit: # Ensure that slice does not extend past original limit return self._ref(skip=skip+start, limit=limit-start, **opts) data = self._ref(skip=skip+start, limit=stop-start, **opts) elif start is not None and stop is None and start >= 0: if limit is not None: if start >= limit: # Result is out of range return dict() # Ensure that slice does not extend past original limit data = self._ref(skip=skip+start, limit=limit-start, **opts) else: data = self._ref(skip=skip+start, **opts) elif start is None and stop is not None and stop >= 0: if limit is not None and stop > limit: # Ensure that slice does not extend past original limit data = self._ref(skip=skip, limit=limit, **opts) else: data = self._ref(skip=skip, limit=stop, **opts) return data
[ "def", "_handle_result_by_idx_slice", "(", "self", ",", "idx_slice", ")", ":", "opts", "=", "dict", "(", "self", ".", "options", ")", "skip", "=", "opts", ".", "pop", "(", "'skip'", ",", "0", ")", "limit", "=", "opts", ".", "pop", "(", "'limit'", ","...
Handle processing when the result argument provided is an index slice.
[ "Handle", "processing", "when", "the", "result", "argument", "provided", "is", "an", "index", "slice", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/result.py#L261-L298
train
206,442
cloudant/python-cloudant
src/cloudant/result.py
Result._handle_result_by_key_slice
def _handle_result_by_key_slice(self, key_slice): """ Handle processing when the result argument provided is a key slice. """ invalid_options = ('key', 'keys', 'startkey', 'endkey') if any(x in invalid_options for x in self.options): raise ResultException(102, invalid_options, self.options) if isinstance(key_slice.start, ResultByKey): start = key_slice.start() else: start = key_slice.start if isinstance(key_slice.stop, ResultByKey): stop = key_slice.stop() else: stop = key_slice.stop if (start is not None and stop is not None and isinstance(start, type(stop))): data = self._ref(startkey=start, endkey=stop, **self.options) elif start is not None and stop is None: data = self._ref(startkey=start, **self.options) elif start is None and stop is not None: data = self._ref(endkey=stop, **self.options) else: data = None return data
python
def _handle_result_by_key_slice(self, key_slice): """ Handle processing when the result argument provided is a key slice. """ invalid_options = ('key', 'keys', 'startkey', 'endkey') if any(x in invalid_options for x in self.options): raise ResultException(102, invalid_options, self.options) if isinstance(key_slice.start, ResultByKey): start = key_slice.start() else: start = key_slice.start if isinstance(key_slice.stop, ResultByKey): stop = key_slice.stop() else: stop = key_slice.stop if (start is not None and stop is not None and isinstance(start, type(stop))): data = self._ref(startkey=start, endkey=stop, **self.options) elif start is not None and stop is None: data = self._ref(startkey=start, **self.options) elif start is None and stop is not None: data = self._ref(endkey=stop, **self.options) else: data = None return data
[ "def", "_handle_result_by_key_slice", "(", "self", ",", "key_slice", ")", ":", "invalid_options", "=", "(", "'key'", ",", "'keys'", ",", "'startkey'", ",", "'endkey'", ")", "if", "any", "(", "x", "in", "invalid_options", "for", "x", "in", "self", ".", "opt...
Handle processing when the result argument provided is a key slice.
[ "Handle", "processing", "when", "the", "result", "argument", "provided", "is", "a", "key", "slice", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/result.py#L300-L327
train
206,443
cloudant/python-cloudant
src/cloudant/result.py
Result._iterator
def _iterator(self, response): ''' Iterate through view data. ''' while True: result = deque(self._parse_data(response)) del response if result: doc_count = len(result) last = result.pop() while result: yield result.popleft() # We expect doc_count = self._page_size + 1 results, if # we have self._page_size or less it means we are on the # last page and need to return the last result. if doc_count < self._real_page_size: yield last break del result # if we are in a view, keys could be duplicate so we # need to start from the right docid if last['id']: response = self._call(startkey=last['key'], startkey_docid=last['id']) # reduce result keys are unique by definition else: response = self._call(startkey=last['key']) else: break
python
def _iterator(self, response): ''' Iterate through view data. ''' while True: result = deque(self._parse_data(response)) del response if result: doc_count = len(result) last = result.pop() while result: yield result.popleft() # We expect doc_count = self._page_size + 1 results, if # we have self._page_size or less it means we are on the # last page and need to return the last result. if doc_count < self._real_page_size: yield last break del result # if we are in a view, keys could be duplicate so we # need to start from the right docid if last['id']: response = self._call(startkey=last['key'], startkey_docid=last['id']) # reduce result keys are unique by definition else: response = self._call(startkey=last['key']) else: break
[ "def", "_iterator", "(", "self", ",", "response", ")", ":", "while", "True", ":", "result", "=", "deque", "(", "self", ".", "_parse_data", "(", "response", ")", ")", "del", "response", "if", "result", ":", "doc_count", "=", "len", "(", "result", ")", ...
Iterate through view data.
[ "Iterate", "through", "view", "data", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/result.py#L376-L408
train
206,444
cloudant/python-cloudant
src/cloudant/result.py
QueryResult._iterator
def _iterator(self, response): ''' Iterate through query data. ''' while True: result = self._parse_data(response) bookmark = response.get('bookmark') if result: for row in result: yield row del result if not bookmark: break response = self._call(bookmark=bookmark) else: break
python
def _iterator(self, response): ''' Iterate through query data. ''' while True: result = self._parse_data(response) bookmark = response.get('bookmark') if result: for row in result: yield row del result if not bookmark: break response = self._call(bookmark=bookmark) else: break
[ "def", "_iterator", "(", "self", ",", "response", ")", ":", "while", "True", ":", "result", "=", "self", ".", "_parse_data", "(", "response", ")", "bookmark", "=", "response", ".", "get", "(", "'bookmark'", ")", "if", "result", ":", "for", "row", "in",...
Iterate through query data.
[ "Iterate", "through", "query", "data", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/result.py#L563-L583
train
206,445
cloudant/python-cloudant
src/cloudant/view.py
View.url
def url(self): """ Constructs and returns the View URL. :returns: View URL """ if self._partition_key: base_url = self.design_doc.document_partition_url( self._partition_key) else: base_url = self.design_doc.document_url return '/'.join(( base_url, '_view', self.view_name ))
python
def url(self): """ Constructs and returns the View URL. :returns: View URL """ if self._partition_key: base_url = self.design_doc.document_partition_url( self._partition_key) else: base_url = self.design_doc.document_url return '/'.join(( base_url, '_view', self.view_name ))
[ "def", "url", "(", "self", ")", ":", "if", "self", ".", "_partition_key", ":", "base_url", "=", "self", ".", "design_doc", ".", "document_partition_url", "(", "self", ".", "_partition_key", ")", "else", ":", "base_url", "=", "self", ".", "design_doc", ".",...
Constructs and returns the View URL. :returns: View URL
[ "Constructs", "and", "returns", "the", "View", "URL", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/view.py#L168-L184
train
206,446
cloudant/python-cloudant
src/cloudant/index.py
Index.as_a_dict
def as_a_dict(self): """ Displays the index as a dictionary. This includes the design document id, index name, index type, and index definition. :returns: Dictionary representation of the index as a dictionary """ index_dict = { 'ddoc': self._ddoc_id, 'name': self._name, 'type': self._type, 'def': self._def } if self._partitioned: index_dict['partitioned'] = True return index_dict
python
def as_a_dict(self): """ Displays the index as a dictionary. This includes the design document id, index name, index type, and index definition. :returns: Dictionary representation of the index as a dictionary """ index_dict = { 'ddoc': self._ddoc_id, 'name': self._name, 'type': self._type, 'def': self._def } if self._partitioned: index_dict['partitioned'] = True return index_dict
[ "def", "as_a_dict", "(", "self", ")", ":", "index_dict", "=", "{", "'ddoc'", ":", "self", ".", "_ddoc_id", ",", "'name'", ":", "self", ".", "_name", ",", "'type'", ":", "self", ".", "_type", ",", "'def'", ":", "self", ".", "_def", "}", "if", "self"...
Displays the index as a dictionary. This includes the design document id, index name, index type, and index definition. :returns: Dictionary representation of the index as a dictionary
[ "Displays", "the", "index", "as", "a", "dictionary", ".", "This", "includes", "the", "design", "document", "id", "index", "name", "index", "type", "and", "index", "definition", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/index.py#L117-L134
train
206,447
cloudant/python-cloudant
src/cloudant/index.py
Index.create
def create(self): """ Creates the current index in the remote database. """ payload = {'type': self._type} if self._ddoc_id and self._ddoc_id != '': if isinstance(self._ddoc_id, STRTYPE): if self._ddoc_id.startswith('_design/'): payload['ddoc'] = self._ddoc_id[8:] else: payload['ddoc'] = self._ddoc_id else: raise CloudantArgumentError(122, self._ddoc_id) if self._name and self._name != '': if isinstance(self._name, STRTYPE): payload['name'] = self._name else: raise CloudantArgumentError(123, self._name) self._def_check() payload['index'] = self._def if self._partitioned: payload['partitioned'] = True headers = {'Content-Type': 'application/json'} resp = self._r_session.post( self.index_url, data=json.dumps(payload, cls=self._database.client.encoder), headers=headers ) resp.raise_for_status() self._ddoc_id = response_to_json_dict(resp)['id'] self._name = response_to_json_dict(resp)['name']
python
def create(self): """ Creates the current index in the remote database. """ payload = {'type': self._type} if self._ddoc_id and self._ddoc_id != '': if isinstance(self._ddoc_id, STRTYPE): if self._ddoc_id.startswith('_design/'): payload['ddoc'] = self._ddoc_id[8:] else: payload['ddoc'] = self._ddoc_id else: raise CloudantArgumentError(122, self._ddoc_id) if self._name and self._name != '': if isinstance(self._name, STRTYPE): payload['name'] = self._name else: raise CloudantArgumentError(123, self._name) self._def_check() payload['index'] = self._def if self._partitioned: payload['partitioned'] = True headers = {'Content-Type': 'application/json'} resp = self._r_session.post( self.index_url, data=json.dumps(payload, cls=self._database.client.encoder), headers=headers ) resp.raise_for_status() self._ddoc_id = response_to_json_dict(resp)['id'] self._name = response_to_json_dict(resp)['name']
[ "def", "create", "(", "self", ")", ":", "payload", "=", "{", "'type'", ":", "self", ".", "_type", "}", "if", "self", ".", "_ddoc_id", "and", "self", ".", "_ddoc_id", "!=", "''", ":", "if", "isinstance", "(", "self", ".", "_ddoc_id", ",", "STRTYPE", ...
Creates the current index in the remote database.
[ "Creates", "the", "current", "index", "in", "the", "remote", "database", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/index.py#L136-L168
train
206,448
cloudant/python-cloudant
src/cloudant/index.py
Index.delete
def delete(self): """ Removes the current index from the remote database. """ if not self._ddoc_id: raise CloudantArgumentError(125) if not self._name: raise CloudantArgumentError(126) ddoc_id = self._ddoc_id if ddoc_id.startswith('_design/'): ddoc_id = ddoc_id[8:] url = '/'.join((self.index_url, ddoc_id, self._type, self._name)) resp = self._r_session.delete(url) resp.raise_for_status()
python
def delete(self): """ Removes the current index from the remote database. """ if not self._ddoc_id: raise CloudantArgumentError(125) if not self._name: raise CloudantArgumentError(126) ddoc_id = self._ddoc_id if ddoc_id.startswith('_design/'): ddoc_id = ddoc_id[8:] url = '/'.join((self.index_url, ddoc_id, self._type, self._name)) resp = self._r_session.delete(url) resp.raise_for_status()
[ "def", "delete", "(", "self", ")", ":", "if", "not", "self", ".", "_ddoc_id", ":", "raise", "CloudantArgumentError", "(", "125", ")", "if", "not", "self", ".", "_name", ":", "raise", "CloudantArgumentError", "(", "126", ")", "ddoc_id", "=", "self", ".", ...
Removes the current index from the remote database.
[ "Removes", "the", "current", "index", "from", "the", "remote", "database", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/index.py#L177-L190
train
206,449
cloudant/python-cloudant
src/cloudant/index.py
TextIndex._def_check
def _def_check(self): """ Checks that the definition provided contains only valid arguments for a text index. """ if self._def != dict(): for key, val in iteritems_(self._def): if key not in list(TEXT_INDEX_ARGS.keys()): raise CloudantArgumentError(127, key) if not isinstance(val, TEXT_INDEX_ARGS[key]): raise CloudantArgumentError(128, key, TEXT_INDEX_ARGS[key])
python
def _def_check(self): """ Checks that the definition provided contains only valid arguments for a text index. """ if self._def != dict(): for key, val in iteritems_(self._def): if key not in list(TEXT_INDEX_ARGS.keys()): raise CloudantArgumentError(127, key) if not isinstance(val, TEXT_INDEX_ARGS[key]): raise CloudantArgumentError(128, key, TEXT_INDEX_ARGS[key])
[ "def", "_def_check", "(", "self", ")", ":", "if", "self", ".", "_def", "!=", "dict", "(", ")", ":", "for", "key", ",", "val", "in", "iteritems_", "(", "self", ".", "_def", ")", ":", "if", "key", "not", "in", "list", "(", "TEXT_INDEX_ARGS", ".", "...
Checks that the definition provided contains only valid arguments for a text index.
[ "Checks", "that", "the", "definition", "provided", "contains", "only", "valid", "arguments", "for", "a", "text", "index", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/index.py#L219-L229
train
206,450
cloudant/python-cloudant
src/cloudant/security_document.py
SecurityDocument.fetch
def fetch(self): """ Retrieves the content of the current security document from the remote database and populates the locally cached SecurityDocument object with that content. A call to fetch will overwrite any dictionary content currently in the locally cached SecurityDocument object. """ resp = self.r_session.get(self.document_url) resp.raise_for_status() self.clear() self.update(response_to_json_dict(resp))
python
def fetch(self): """ Retrieves the content of the current security document from the remote database and populates the locally cached SecurityDocument object with that content. A call to fetch will overwrite any dictionary content currently in the locally cached SecurityDocument object. """ resp = self.r_session.get(self.document_url) resp.raise_for_status() self.clear() self.update(response_to_json_dict(resp))
[ "def", "fetch", "(", "self", ")", ":", "resp", "=", "self", ".", "r_session", ".", "get", "(", "self", ".", "document_url", ")", "resp", ".", "raise_for_status", "(", ")", "self", ".", "clear", "(", ")", "self", ".", "update", "(", "response_to_json_di...
Retrieves the content of the current security document from the remote database and populates the locally cached SecurityDocument object with that content. A call to fetch will overwrite any dictionary content currently in the locally cached SecurityDocument object.
[ "Retrieves", "the", "content", "of", "the", "current", "security", "document", "from", "the", "remote", "database", "and", "populates", "the", "locally", "cached", "SecurityDocument", "object", "with", "that", "content", ".", "A", "call", "to", "fetch", "will", ...
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/security_document.py#L95-L105
train
206,451
cloudant/python-cloudant
src/cloudant/security_document.py
SecurityDocument.save
def save(self): """ Saves changes made to the locally cached SecurityDocument object's data structures to the remote database. """ resp = self.r_session.put( self.document_url, data=self.json(), headers={'Content-Type': 'application/json'} ) resp.raise_for_status()
python
def save(self): """ Saves changes made to the locally cached SecurityDocument object's data structures to the remote database. """ resp = self.r_session.put( self.document_url, data=self.json(), headers={'Content-Type': 'application/json'} ) resp.raise_for_status()
[ "def", "save", "(", "self", ")", ":", "resp", "=", "self", ".", "r_session", ".", "put", "(", "self", ".", "document_url", ",", "data", "=", "self", ".", "json", "(", ")", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", "}", ")...
Saves changes made to the locally cached SecurityDocument object's data structures to the remote database.
[ "Saves", "changes", "made", "to", "the", "locally", "cached", "SecurityDocument", "object", "s", "data", "structures", "to", "the", "remote", "database", "." ]
e0ba190f6ba07fe3522a668747128214ad573c7e
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/security_document.py#L107-L117
train
206,452
antocuni/pdb
pdb.py
xpm
def xpm(Pdb=Pdb): """ To be used inside an except clause, enter a post-mortem pdb related to the just catched exception. """ info = sys.exc_info() print(traceback.format_exc()) post_mortem(info[2], Pdb)
python
def xpm(Pdb=Pdb): """ To be used inside an except clause, enter a post-mortem pdb related to the just catched exception. """ info = sys.exc_info() print(traceback.format_exc()) post_mortem(info[2], Pdb)
[ "def", "xpm", "(", "Pdb", "=", "Pdb", ")", ":", "info", "=", "sys", ".", "exc_info", "(", ")", "print", "(", "traceback", ".", "format_exc", "(", ")", ")", "post_mortem", "(", "info", "[", "2", "]", ",", "Pdb", ")" ]
To be used inside an except clause, enter a post-mortem pdb related to the just catched exception.
[ "To", "be", "used", "inside", "an", "except", "clause", "enter", "a", "post", "-", "mortem", "pdb", "related", "to", "the", "just", "catched", "exception", "." ]
a88be00d31f1ff38e26711a1d99589d830524c9e
https://github.com/antocuni/pdb/blob/a88be00d31f1ff38e26711a1d99589d830524c9e/pdb.py#L1338-L1345
train
206,453
antocuni/pdb
pdb.py
Pdb.complete
def complete(self, text, state): """Handle completions from fancycompleter and original pdb.""" if state == 0: local._pdbpp_completing = True mydict = self.curframe.f_globals.copy() mydict.update(self.curframe_locals) completer = Completer(mydict) self._completions = self._get_all_completions( completer.complete, text) real_pdb = super(Pdb, self) for x in self._get_all_completions(real_pdb.complete, text): if x not in self._completions: self._completions.append(x) self._filter_completions(text) del local._pdbpp_completing # Remove "\t" from fancycompleter if there are pdb completions. if len(self._completions) > 1 and self._completions[0] == "\t": self._completions.pop(0) try: return self._completions[state] except IndexError: return None
python
def complete(self, text, state): """Handle completions from fancycompleter and original pdb.""" if state == 0: local._pdbpp_completing = True mydict = self.curframe.f_globals.copy() mydict.update(self.curframe_locals) completer = Completer(mydict) self._completions = self._get_all_completions( completer.complete, text) real_pdb = super(Pdb, self) for x in self._get_all_completions(real_pdb.complete, text): if x not in self._completions: self._completions.append(x) self._filter_completions(text) del local._pdbpp_completing # Remove "\t" from fancycompleter if there are pdb completions. if len(self._completions) > 1 and self._completions[0] == "\t": self._completions.pop(0) try: return self._completions[state] except IndexError: return None
[ "def", "complete", "(", "self", ",", "text", ",", "state", ")", ":", "if", "state", "==", "0", ":", "local", ".", "_pdbpp_completing", "=", "True", "mydict", "=", "self", ".", "curframe", ".", "f_globals", ".", "copy", "(", ")", "mydict", ".", "updat...
Handle completions from fancycompleter and original pdb.
[ "Handle", "completions", "from", "fancycompleter", "and", "original", "pdb", "." ]
a88be00d31f1ff38e26711a1d99589d830524c9e
https://github.com/antocuni/pdb/blob/a88be00d31f1ff38e26711a1d99589d830524c9e/pdb.py#L356-L382
train
206,454
antocuni/pdb
pdb.py
Pdb.do_edit
def do_edit(self, arg): "Open an editor visiting the current file at the current line" if arg == '': filename, lineno = self._get_current_position() else: filename, lineno, _ = self._get_position_of_arg(arg) if filename is None: return # this case handles code generated with py.code.Source() # filename is something like '<0-codegen foo.py:18>' match = re.match(r'.*<\d+-codegen (.*):(\d+)>', filename) if match: filename = match.group(1) lineno = int(match.group(2)) try: self._open_editor(self._get_editor_cmd(filename, lineno)) except Exception as exc: self.error(exc)
python
def do_edit(self, arg): "Open an editor visiting the current file at the current line" if arg == '': filename, lineno = self._get_current_position() else: filename, lineno, _ = self._get_position_of_arg(arg) if filename is None: return # this case handles code generated with py.code.Source() # filename is something like '<0-codegen foo.py:18>' match = re.match(r'.*<\d+-codegen (.*):(\d+)>', filename) if match: filename = match.group(1) lineno = int(match.group(2)) try: self._open_editor(self._get_editor_cmd(filename, lineno)) except Exception as exc: self.error(exc)
[ "def", "do_edit", "(", "self", ",", "arg", ")", ":", "if", "arg", "==", "''", ":", "filename", ",", "lineno", "=", "self", ".", "_get_current_position", "(", ")", "else", ":", "filename", ",", "lineno", ",", "_", "=", "self", ".", "_get_position_of_arg...
Open an editor visiting the current file at the current line
[ "Open", "an", "editor", "visiting", "the", "current", "file", "at", "the", "current", "line" ]
a88be00d31f1ff38e26711a1d99589d830524c9e
https://github.com/antocuni/pdb/blob/a88be00d31f1ff38e26711a1d99589d830524c9e/pdb.py#L1142-L1160
train
206,455
antocuni/pdb
pdb.py
Pdb.set_trace
def set_trace(self, frame=None): """Remember starting frame. This is used with pytest, which does not use pdb.set_trace(). """ if hasattr(local, '_pdbpp_completing'): # Handle set_trace being called during completion, e.g. with # fancycompleter's attr_matches. return if frame is None: frame = sys._getframe().f_back self._via_set_trace_frame = frame return super(Pdb, self).set_trace(frame)
python
def set_trace(self, frame=None): """Remember starting frame. This is used with pytest, which does not use pdb.set_trace(). """ if hasattr(local, '_pdbpp_completing'): # Handle set_trace being called during completion, e.g. with # fancycompleter's attr_matches. return if frame is None: frame = sys._getframe().f_back self._via_set_trace_frame = frame return super(Pdb, self).set_trace(frame)
[ "def", "set_trace", "(", "self", ",", "frame", "=", "None", ")", ":", "if", "hasattr", "(", "local", ",", "'_pdbpp_completing'", ")", ":", "# Handle set_trace being called during completion, e.g. with", "# fancycompleter's attr_matches.", "return", "if", "frame", "is", ...
Remember starting frame. This is used with pytest, which does not use pdb.set_trace().
[ "Remember", "starting", "frame", "." ]
a88be00d31f1ff38e26711a1d99589d830524c9e
https://github.com/antocuni/pdb/blob/a88be00d31f1ff38e26711a1d99589d830524c9e/pdb.py#L1204-L1216
train
206,456
antocuni/pdb
pdb.py
Pdb._remove_bdb_context
def _remove_bdb_context(evalue): """Remove exception context from Pdb from the exception. E.g. "AttributeError: 'Pdb' object has no attribute 'do_foo'", when trying to look up commands (bpo-36494). """ removed_bdb_context = evalue while removed_bdb_context.__context__: ctx = removed_bdb_context.__context__ if ( isinstance(ctx, AttributeError) and ctx.__traceback__.tb_frame.f_code.co_name == "onecmd" ): removed_bdb_context.__context__ = None break removed_bdb_context = removed_bdb_context.__context__
python
def _remove_bdb_context(evalue): """Remove exception context from Pdb from the exception. E.g. "AttributeError: 'Pdb' object has no attribute 'do_foo'", when trying to look up commands (bpo-36494). """ removed_bdb_context = evalue while removed_bdb_context.__context__: ctx = removed_bdb_context.__context__ if ( isinstance(ctx, AttributeError) and ctx.__traceback__.tb_frame.f_code.co_name == "onecmd" ): removed_bdb_context.__context__ = None break removed_bdb_context = removed_bdb_context.__context__
[ "def", "_remove_bdb_context", "(", "evalue", ")", ":", "removed_bdb_context", "=", "evalue", "while", "removed_bdb_context", ".", "__context__", ":", "ctx", "=", "removed_bdb_context", ".", "__context__", "if", "(", "isinstance", "(", "ctx", ",", "AttributeError", ...
Remove exception context from Pdb from the exception. E.g. "AttributeError: 'Pdb' object has no attribute 'do_foo'", when trying to look up commands (bpo-36494).
[ "Remove", "exception", "context", "from", "Pdb", "from", "the", "exception", "." ]
a88be00d31f1ff38e26711a1d99589d830524c9e
https://github.com/antocuni/pdb/blob/a88be00d31f1ff38e26711a1d99589d830524c9e/pdb.py#L1258-L1273
train
206,457
ottogroup/palladium
palladium/util.py
args_from_config
def args_from_config(func): """Decorator that injects parameters from the configuration. """ func_args = signature(func).parameters @wraps(func) def wrapper(*args, **kwargs): config = get_config() for i, argname in enumerate(func_args): if len(args) > i or argname in kwargs: continue elif argname in config: kwargs[argname] = config[argname] try: getcallargs(func, *args, **kwargs) except TypeError as exc: msg = "{}\n{}".format(exc.args[0], PALLADIUM_CONFIG_ERROR) exc.args = (msg,) raise exc return func(*args, **kwargs) wrapper.__wrapped__ = func return wrapper
python
def args_from_config(func): """Decorator that injects parameters from the configuration. """ func_args = signature(func).parameters @wraps(func) def wrapper(*args, **kwargs): config = get_config() for i, argname in enumerate(func_args): if len(args) > i or argname in kwargs: continue elif argname in config: kwargs[argname] = config[argname] try: getcallargs(func, *args, **kwargs) except TypeError as exc: msg = "{}\n{}".format(exc.args[0], PALLADIUM_CONFIG_ERROR) exc.args = (msg,) raise exc return func(*args, **kwargs) wrapper.__wrapped__ = func return wrapper
[ "def", "args_from_config", "(", "func", ")", ":", "func_args", "=", "signature", "(", "func", ")", ".", "parameters", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "config", "=", "get_config", ...
Decorator that injects parameters from the configuration.
[ "Decorator", "that", "injects", "parameters", "from", "the", "configuration", "." ]
f3a4372fba809efbd8da7c979a8c6faff04684dd
https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/util.py#L62-L84
train
206,458
ottogroup/palladium
palladium/util.py
memory_usage_psutil
def memory_usage_psutil(): """Return the current process memory usage in MB. """ process = psutil.Process(os.getpid()) mem = process.memory_info()[0] / float(2 ** 20) mem_vms = process.memory_info()[1] / float(2 ** 20) return mem, mem_vms
python
def memory_usage_psutil(): """Return the current process memory usage in MB. """ process = psutil.Process(os.getpid()) mem = process.memory_info()[0] / float(2 ** 20) mem_vms = process.memory_info()[1] / float(2 ** 20) return mem, mem_vms
[ "def", "memory_usage_psutil", "(", ")", ":", "process", "=", "psutil", ".", "Process", "(", "os", ".", "getpid", "(", ")", ")", "mem", "=", "process", ".", "memory_info", "(", ")", "[", "0", "]", "/", "float", "(", "2", "**", "20", ")", "mem_vms", ...
Return the current process memory usage in MB.
[ "Return", "the", "current", "process", "memory", "usage", "in", "MB", "." ]
f3a4372fba809efbd8da7c979a8c6faff04684dd
https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/util.py#L196-L202
train
206,459
ottogroup/palladium
palladium/util.py
version_cmd
def version_cmd(argv=sys.argv[1:]): # pragma: no cover """\ Print the version number of Palladium. Usage: pld-version [options] Options: -h --help Show this screen. """ docopt(version_cmd.__doc__, argv=argv) print(__version__)
python
def version_cmd(argv=sys.argv[1:]): # pragma: no cover """\ Print the version number of Palladium. Usage: pld-version [options] Options: -h --help Show this screen. """ docopt(version_cmd.__doc__, argv=argv) print(__version__)
[ "def", "version_cmd", "(", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", ")", ":", "# pragma: no cover", "docopt", "(", "version_cmd", ".", "__doc__", ",", "argv", "=", "argv", ")", "print", "(", "__version__", ")" ]
\ Print the version number of Palladium. Usage: pld-version [options] Options: -h --help Show this screen.
[ "\\", "Print", "the", "version", "number", "of", "Palladium", "." ]
f3a4372fba809efbd8da7c979a8c6faff04684dd
https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/util.py#L205-L216
train
206,460
ottogroup/palladium
palladium/util.py
upgrade_cmd
def upgrade_cmd(argv=sys.argv[1:]): # pragma: no cover """\ Upgrade the database to the latest version. Usage: pld-ugprade [options] Options: --from=<v> Upgrade from a specific version, overriding the version stored in the database. --to=<v> Upgrade to a specific version instead of the latest version. -h --help Show this screen. """ arguments = docopt(upgrade_cmd.__doc__, argv=argv) initialize_config(__mode__='fit') upgrade(from_version=arguments['--from'], to_version=arguments['--to'])
python
def upgrade_cmd(argv=sys.argv[1:]): # pragma: no cover """\ Upgrade the database to the latest version. Usage: pld-ugprade [options] Options: --from=<v> Upgrade from a specific version, overriding the version stored in the database. --to=<v> Upgrade to a specific version instead of the latest version. -h --help Show this screen. """ arguments = docopt(upgrade_cmd.__doc__, argv=argv) initialize_config(__mode__='fit') upgrade(from_version=arguments['--from'], to_version=arguments['--to'])
[ "def", "upgrade_cmd", "(", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", ")", ":", "# pragma: no cover", "arguments", "=", "docopt", "(", "upgrade_cmd", ".", "__doc__", ",", "argv", "=", "argv", ")", "initialize_config", "(", "__mode__", "=", "'fi...
\ Upgrade the database to the latest version. Usage: pld-ugprade [options] Options: --from=<v> Upgrade from a specific version, overriding the version stored in the database. --to=<v> Upgrade to a specific version instead of the latest version. -h --help Show this screen.
[ "\\", "Upgrade", "the", "database", "to", "the", "latest", "version", "." ]
f3a4372fba809efbd8da7c979a8c6faff04684dd
https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/util.py#L227-L245
train
206,461
ottogroup/palladium
palladium/util.py
export_cmd
def export_cmd(argv=sys.argv[1:]): # pragma: no cover """\ Export a model from one model persister to another. The model persister to export to is supposed to be available in the configuration file under the 'model_persister_export' key. Usage: pld-export [options] Options: --version=<v> Export a specific version rather than the active one. --no-activate Don't activate the exported model with the 'model_persister_export'. -h --help Show this screen. """ arguments = docopt(export_cmd.__doc__, argv=argv) model_version = export( model_version=arguments['--version'], activate=not arguments['--no-activate'], ) logger.info("Exported model. New version number: {}".format(model_version))
python
def export_cmd(argv=sys.argv[1:]): # pragma: no cover """\ Export a model from one model persister to another. The model persister to export to is supposed to be available in the configuration file under the 'model_persister_export' key. Usage: pld-export [options] Options: --version=<v> Export a specific version rather than the active one. --no-activate Don't activate the exported model with the 'model_persister_export'. -h --help Show this screen. """ arguments = docopt(export_cmd.__doc__, argv=argv) model_version = export( model_version=arguments['--version'], activate=not arguments['--no-activate'], ) logger.info("Exported model. New version number: {}".format(model_version))
[ "def", "export_cmd", "(", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", ")", ":", "# pragma: no cover", "arguments", "=", "docopt", "(", "export_cmd", ".", "__doc__", ",", "argv", "=", "argv", ")", "model_version", "=", "export", "(", "model_versi...
\ Export a model from one model persister to another. The model persister to export to is supposed to be available in the configuration file under the 'model_persister_export' key. Usage: pld-export [options] Options: --version=<v> Export a specific version rather than the active one. --no-activate Don't activate the exported model with the 'model_persister_export'. -h --help Show this screen.
[ "\\", "Export", "a", "model", "from", "one", "model", "persister", "to", "another", "." ]
f3a4372fba809efbd8da7c979a8c6faff04684dd
https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/util.py#L262-L286
train
206,462
ottogroup/palladium
palladium/util.py
Partial
def Partial(func, **kwargs): """Allows the use of partially applied functions in the configuration. """ if isinstance(func, str): func = resolve_dotted_name(func) partial_func = partial(func, **kwargs) update_wrapper(partial_func, func) return partial_func
python
def Partial(func, **kwargs): """Allows the use of partially applied functions in the configuration. """ if isinstance(func, str): func = resolve_dotted_name(func) partial_func = partial(func, **kwargs) update_wrapper(partial_func, func) return partial_func
[ "def", "Partial", "(", "func", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "func", ",", "str", ")", ":", "func", "=", "resolve_dotted_name", "(", "func", ")", "partial_func", "=", "partial", "(", "func", ",", "*", "*", "kwargs", ")", ...
Allows the use of partially applied functions in the configuration.
[ "Allows", "the", "use", "of", "partially", "applied", "functions", "in", "the", "configuration", "." ]
f3a4372fba809efbd8da7c979a8c6faff04684dd
https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/util.py#L333-L341
train
206,463
ottogroup/palladium
palladium/server.py
create_predict_function
def create_predict_function( route, predict_service, decorator_list_name, config): """Creates a predict function and registers it to the Flask app using the route decorator. :param str route: Path of the entry point. :param palladium.interfaces.PredictService predict_service: The predict service to be registered to this entry point. :param str decorator_list_name: The decorator list to be used for this predict service. It is OK if there is no such entry in the active Palladium config. :return: A predict service function that will be used to process predict requests. """ model_persister = config.get('model_persister') @app.route(route, methods=['GET', 'POST'], endpoint=route) @PluggableDecorator(decorator_list_name) def predict_func(): return predict(model_persister, predict_service) return predict_func
python
def create_predict_function( route, predict_service, decorator_list_name, config): """Creates a predict function and registers it to the Flask app using the route decorator. :param str route: Path of the entry point. :param palladium.interfaces.PredictService predict_service: The predict service to be registered to this entry point. :param str decorator_list_name: The decorator list to be used for this predict service. It is OK if there is no such entry in the active Palladium config. :return: A predict service function that will be used to process predict requests. """ model_persister = config.get('model_persister') @app.route(route, methods=['GET', 'POST'], endpoint=route) @PluggableDecorator(decorator_list_name) def predict_func(): return predict(model_persister, predict_service) return predict_func
[ "def", "create_predict_function", "(", "route", ",", "predict_service", ",", "decorator_list_name", ",", "config", ")", ":", "model_persister", "=", "config", ".", "get", "(", "'model_persister'", ")", "@", "app", ".", "route", "(", "route", ",", "methods", "=...
Creates a predict function and registers it to the Flask app using the route decorator. :param str route: Path of the entry point. :param palladium.interfaces.PredictService predict_service: The predict service to be registered to this entry point. :param str decorator_list_name: The decorator list to be used for this predict service. It is OK if there is no such entry in the active Palladium config. :return: A predict service function that will be used to process predict requests.
[ "Creates", "a", "predict", "function", "and", "registers", "it", "to", "the", "Flask", "app", "using", "the", "route", "decorator", "." ]
f3a4372fba809efbd8da7c979a8c6faff04684dd
https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/server.py#L270-L296
train
206,464
ottogroup/palladium
palladium/server.py
devserver_cmd
def devserver_cmd(argv=sys.argv[1:]): # pragma: no cover """\ Serve the web API for development. Usage: pld-devserver [options] Options: -h --help Show this screen. --host=<host> The host to use [default: 0.0.0.0]. --port=<port> The port to use [default: 5000]. --debug=<debug> Whether or not to use debug mode [default: 0]. """ arguments = docopt(devserver_cmd.__doc__, argv=argv) initialize_config() app.run( host=arguments['--host'], port=int(arguments['--port']), debug=int(arguments['--debug']), )
python
def devserver_cmd(argv=sys.argv[1:]): # pragma: no cover """\ Serve the web API for development. Usage: pld-devserver [options] Options: -h --help Show this screen. --host=<host> The host to use [default: 0.0.0.0]. --port=<port> The port to use [default: 5000]. --debug=<debug> Whether or not to use debug mode [default: 0]. """ arguments = docopt(devserver_cmd.__doc__, argv=argv) initialize_config() app.run( host=arguments['--host'], port=int(arguments['--port']), debug=int(arguments['--debug']), )
[ "def", "devserver_cmd", "(", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", ")", ":", "# pragma: no cover", "arguments", "=", "docopt", "(", "devserver_cmd", ".", "__doc__", ",", "argv", "=", "argv", ")", "initialize_config", "(", ")", "app", ".", ...
\ Serve the web API for development. Usage: pld-devserver [options] Options: -h --help Show this screen. --host=<host> The host to use [default: 0.0.0.0]. --port=<port> The port to use [default: 5000]. --debug=<debug> Whether or not to use debug mode [default: 0].
[ "\\", "Serve", "the", "web", "API", "for", "development", "." ]
f3a4372fba809efbd8da7c979a8c6faff04684dd
https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/server.py#L299-L321
train
206,465
ottogroup/palladium
palladium/server.py
stream_cmd
def stream_cmd(argv=sys.argv[1:]): # pragma: no cover """\ Start the streaming server, which listens to stdin, processes line by line, and returns predictions. The input should consist of a list of json objects, where each object will result in a prediction. Each line is processed in a batch. Example input (must be on a single line): [{"sepal length": 1.0, "sepal width": 1.1, "petal length": 0.7, "petal width": 5}, {"sepal length": 1.0, "sepal width": 8.0, "petal length": 1.4, "petal width": 5}] Example output: ["Iris-virginica","Iris-setosa"] An input line with the word 'exit' will quit the streaming server. Usage: pld-stream [options] Options: -h --help Show this screen. """ docopt(stream_cmd.__doc__, argv=argv) initialize_config() stream = PredictStream() stream.listen(sys.stdin, sys.stdout, sys.stderr)
python
def stream_cmd(argv=sys.argv[1:]): # pragma: no cover """\ Start the streaming server, which listens to stdin, processes line by line, and returns predictions. The input should consist of a list of json objects, where each object will result in a prediction. Each line is processed in a batch. Example input (must be on a single line): [{"sepal length": 1.0, "sepal width": 1.1, "petal length": 0.7, "petal width": 5}, {"sepal length": 1.0, "sepal width": 8.0, "petal length": 1.4, "petal width": 5}] Example output: ["Iris-virginica","Iris-setosa"] An input line with the word 'exit' will quit the streaming server. Usage: pld-stream [options] Options: -h --help Show this screen. """ docopt(stream_cmd.__doc__, argv=argv) initialize_config() stream = PredictStream() stream.listen(sys.stdin, sys.stdout, sys.stderr)
[ "def", "stream_cmd", "(", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", ")", ":", "# pragma: no cover", "docopt", "(", "stream_cmd", ".", "__doc__", ",", "argv", "=", "argv", ")", "initialize_config", "(", ")", "stream", "=", "PredictStream", "(",...
\ Start the streaming server, which listens to stdin, processes line by line, and returns predictions. The input should consist of a list of json objects, where each object will result in a prediction. Each line is processed in a batch. Example input (must be on a single line): [{"sepal length": 1.0, "sepal width": 1.1, "petal length": 0.7, "petal width": 5}, {"sepal length": 1.0, "sepal width": 8.0, "petal length": 1.4, "petal width": 5}] Example output: ["Iris-virginica","Iris-setosa"] An input line with the word 'exit' will quit the streaming server. Usage: pld-stream [options] Options: -h --help Show this screen.
[ "\\", "Start", "the", "streaming", "server", "which", "listens", "to", "stdin", "processes", "line", "by", "line", "and", "returns", "predictions", "." ]
f3a4372fba809efbd8da7c979a8c6faff04684dd
https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/server.py#L362-L391
train
206,466
ottogroup/palladium
palladium/server.py
PredictStream.listen
def listen(self, io_in, io_out, io_err): """Listens to provided io stream and writes predictions to output. In case of errors, the error stream will be used. """ for line in io_in: if line.strip().lower() == 'exit': break try: y_pred = self.process_line(line) except Exception as e: io_out.write('[]\n') io_err.write( "Error while processing input row: {}" "{}: {}\n".format(line, type(e), e)) io_err.flush() else: io_out.write(ujson.dumps(y_pred.tolist())) io_out.write('\n') io_out.flush()
python
def listen(self, io_in, io_out, io_err): """Listens to provided io stream and writes predictions to output. In case of errors, the error stream will be used. """ for line in io_in: if line.strip().lower() == 'exit': break try: y_pred = self.process_line(line) except Exception as e: io_out.write('[]\n') io_err.write( "Error while processing input row: {}" "{}: {}\n".format(line, type(e), e)) io_err.flush() else: io_out.write(ujson.dumps(y_pred.tolist())) io_out.write('\n') io_out.flush()
[ "def", "listen", "(", "self", ",", "io_in", ",", "io_out", ",", "io_err", ")", ":", "for", "line", "in", "io_in", ":", "if", "line", ".", "strip", "(", ")", ".", "lower", "(", ")", "==", "'exit'", ":", "break", "try", ":", "y_pred", "=", "self", ...
Listens to provided io stream and writes predictions to output. In case of errors, the error stream will be used.
[ "Listens", "to", "provided", "io", "stream", "and", "writes", "predictions", "to", "output", ".", "In", "case", "of", "errors", "the", "error", "stream", "will", "be", "used", "." ]
f3a4372fba809efbd8da7c979a8c6faff04684dd
https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/server.py#L340-L359
train
206,467
ottogroup/palladium
palladium/eval.py
list_cmd
def list_cmd(argv=sys.argv[1:]): # pragma: no cover """\ List information about available models. Uses the 'model_persister' from the configuration to display a list of models and their metadata. Usage: pld-list [options] Options: -h --help Show this screen. """ docopt(list_cmd.__doc__, argv=argv) initialize_config(__mode__='fit') list()
python
def list_cmd(argv=sys.argv[1:]): # pragma: no cover """\ List information about available models. Uses the 'model_persister' from the configuration to display a list of models and their metadata. Usage: pld-list [options] Options: -h --help Show this screen. """ docopt(list_cmd.__doc__, argv=argv) initialize_config(__mode__='fit') list()
[ "def", "list_cmd", "(", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", ")", ":", "# pragma: no cover", "docopt", "(", "list_cmd", ".", "__doc__", ",", "argv", "=", "argv", ")", "initialize_config", "(", "__mode__", "=", "'fit'", ")", "list", "(",...
\ List information about available models. Uses the 'model_persister' from the configuration to display a list of models and their metadata. Usage: pld-list [options] Options: -h --help Show this screen.
[ "\\", "List", "information", "about", "available", "models", "." ]
f3a4372fba809efbd8da7c979a8c6faff04684dd
https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/eval.py#L82-L97
train
206,468
ottogroup/palladium
palladium/fit.py
fit_cmd
def fit_cmd(argv=sys.argv[1:]): # pragma: no cover """\ Fit a model and save to database. Will use 'dataset_loader_train', 'model', and 'model_perister' from the configuration file, to load a dataset to train a model with, and persist it. Usage: pld-fit [options] Options: -n --no-save Don't persist the fitted model to disk. --no-activate Don't activate the fitted model. --save-if-better-than=<k> Persist only if test score better than given value. -e --evaluate Evaluate fitted model on train and test set and print out results. -h --help Show this screen. """ arguments = docopt(fit_cmd.__doc__, argv=argv) no_save = arguments['--no-save'] no_activate = arguments['--no-activate'] save_if_better_than = arguments['--save-if-better-than'] evaluate = arguments['--evaluate'] or bool(save_if_better_than) if save_if_better_than is not None: save_if_better_than = float(save_if_better_than) initialize_config(__mode__='fit') fit( persist=not no_save, activate=not no_activate, evaluate=evaluate, persist_if_better_than=save_if_better_than, )
python
def fit_cmd(argv=sys.argv[1:]): # pragma: no cover """\ Fit a model and save to database. Will use 'dataset_loader_train', 'model', and 'model_perister' from the configuration file, to load a dataset to train a model with, and persist it. Usage: pld-fit [options] Options: -n --no-save Don't persist the fitted model to disk. --no-activate Don't activate the fitted model. --save-if-better-than=<k> Persist only if test score better than given value. -e --evaluate Evaluate fitted model on train and test set and print out results. -h --help Show this screen. """ arguments = docopt(fit_cmd.__doc__, argv=argv) no_save = arguments['--no-save'] no_activate = arguments['--no-activate'] save_if_better_than = arguments['--save-if-better-than'] evaluate = arguments['--evaluate'] or bool(save_if_better_than) if save_if_better_than is not None: save_if_better_than = float(save_if_better_than) initialize_config(__mode__='fit') fit( persist=not no_save, activate=not no_activate, evaluate=evaluate, persist_if_better_than=save_if_better_than, )
[ "def", "fit_cmd", "(", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", ")", ":", "# pragma: no cover", "arguments", "=", "docopt", "(", "fit_cmd", ".", "__doc__", ",", "argv", "=", "argv", ")", "no_save", "=", "arguments", "[", "'--no-save'", "]",...
\ Fit a model and save to database. Will use 'dataset_loader_train', 'model', and 'model_perister' from the configuration file, to load a dataset to train a model with, and persist it. Usage: pld-fit [options] Options: -n --no-save Don't persist the fitted model to disk. --no-activate Don't activate the fitted model. --save-if-better-than=<k> Persist only if test score better than given value. -e --evaluate Evaluate fitted model on train and test set and print out results. -h --help Show this screen.
[ "\\", "Fit", "a", "model", "and", "save", "to", "database", "." ]
f3a4372fba809efbd8da7c979a8c6faff04684dd
https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/fit.py#L96-L133
train
206,469
ottogroup/palladium
palladium/fit.py
admin_cmd
def admin_cmd(argv=sys.argv[1:]): # pragma: no cover """\ Activate or delete models. Models are usually made active right after fitting (see command pld-fit). The 'activate' command allows you to explicitly set the currently active model. Use 'pld-list' to get an overview of all available models along with their version identifiers. Deleting a model will simply remove it from the database. Usage: pld-admin activate <version> [options] pld-admin delete <version> [options] Options: -h --help Show this screen. """ arguments = docopt(admin_cmd.__doc__, argv=argv) initialize_config(__mode__='fit') if arguments['activate']: activate(model_version=int(arguments['<version>'])) elif arguments['delete']: delete(model_version=int(arguments['<version>']))
python
def admin_cmd(argv=sys.argv[1:]): # pragma: no cover """\ Activate or delete models. Models are usually made active right after fitting (see command pld-fit). The 'activate' command allows you to explicitly set the currently active model. Use 'pld-list' to get an overview of all available models along with their version identifiers. Deleting a model will simply remove it from the database. Usage: pld-admin activate <version> [options] pld-admin delete <version> [options] Options: -h --help Show this screen. """ arguments = docopt(admin_cmd.__doc__, argv=argv) initialize_config(__mode__='fit') if arguments['activate']: activate(model_version=int(arguments['<version>'])) elif arguments['delete']: delete(model_version=int(arguments['<version>']))
[ "def", "admin_cmd", "(", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", ")", ":", "# pragma: no cover", "arguments", "=", "docopt", "(", "admin_cmd", ".", "__doc__", ",", "argv", "=", "argv", ")", "initialize_config", "(", "__mode__", "=", "'fit'",...
\ Activate or delete models. Models are usually made active right after fitting (see command pld-fit). The 'activate' command allows you to explicitly set the currently active model. Use 'pld-list' to get an overview of all available models along with their version identifiers. Deleting a model will simply remove it from the database. Usage: pld-admin activate <version> [options] pld-admin delete <version> [options] Options: -h --help Show this screen.
[ "\\", "Activate", "or", "delete", "models", "." ]
f3a4372fba809efbd8da7c979a8c6faff04684dd
https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/fit.py#L148-L171
train
206,470
ottogroup/palladium
palladium/fit.py
grid_search_cmd
def grid_search_cmd(argv=sys.argv[1:]): # pragma: no cover """\ Grid search parameters for the model. Uses 'dataset_loader_train', 'model', and 'grid_search' from the configuration to load a training dataset, and run a grid search on the model using the grid of hyperparameters. Usage: pld-grid-search [options] Options: --save-results=<fname> Save results to CSV file --persist-best Persist the best model from grid search -h --help Show this screen. """ arguments = docopt(grid_search_cmd.__doc__, argv=argv) initialize_config(__mode__='fit') grid_search( save_results=arguments['--save-results'], persist_best=arguments['--persist-best'], )
python
def grid_search_cmd(argv=sys.argv[1:]): # pragma: no cover """\ Grid search parameters for the model. Uses 'dataset_loader_train', 'model', and 'grid_search' from the configuration to load a training dataset, and run a grid search on the model using the grid of hyperparameters. Usage: pld-grid-search [options] Options: --save-results=<fname> Save results to CSV file --persist-best Persist the best model from grid search -h --help Show this screen. """ arguments = docopt(grid_search_cmd.__doc__, argv=argv) initialize_config(__mode__='fit') grid_search( save_results=arguments['--save-results'], persist_best=arguments['--persist-best'], )
[ "def", "grid_search_cmd", "(", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", ")", ":", "# pragma: no cover", "arguments", "=", "docopt", "(", "grid_search_cmd", ".", "__doc__", ",", "argv", "=", "argv", ")", "initialize_config", "(", "__mode__", "="...
\ Grid search parameters for the model. Uses 'dataset_loader_train', 'model', and 'grid_search' from the configuration to load a training dataset, and run a grid search on the model using the grid of hyperparameters. Usage: pld-grid-search [options] Options: --save-results=<fname> Save results to CSV file --persist-best Persist the best model from grid search -h --help Show this screen.
[ "\\", "Grid", "search", "parameters", "for", "the", "model", "." ]
f3a4372fba809efbd8da7c979a8c6faff04684dd
https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/fit.py#L250-L271
train
206,471
crocs-muni/roca
roca/detect.py
RocaFingerprinter.switch_fingerprint_method
def switch_fingerprint_method(self, old=False): """ Switches main fingerprinting method. :param old: if True old fingerprinting method will be used. :return: """ if old: self.has_fingerprint = self.has_fingerprint_moduli else: self.has_fingerprint = self.has_fingerprint_dlog
python
def switch_fingerprint_method(self, old=False): """ Switches main fingerprinting method. :param old: if True old fingerprinting method will be used. :return: """ if old: self.has_fingerprint = self.has_fingerprint_moduli else: self.has_fingerprint = self.has_fingerprint_dlog
[ "def", "switch_fingerprint_method", "(", "self", ",", "old", "=", "False", ")", ":", "if", "old", ":", "self", ".", "has_fingerprint", "=", "self", ".", "has_fingerprint_moduli", "else", ":", "self", ".", "has_fingerprint", "=", "self", ".", "has_fingerprint_d...
Switches main fingerprinting method. :param old: if True old fingerprinting method will be used. :return:
[ "Switches", "main", "fingerprinting", "method", "." ]
74ad6ce63c428d83dcffce9c5e26ef7b9e30faa5
https://github.com/crocs-muni/roca/blob/74ad6ce63c428d83dcffce9c5e26ef7b9e30faa5/roca/detect.py#L971-L981
train
206,472
J535D165/recordlinkage
recordlinkage/algorithms/indexing.py
_map_tril_1d_on_2d
def _map_tril_1d_on_2d(indices, dims): """Map 1d indices on lower triangular matrix in 2d. """ N = (dims * dims - dims) / 2 m = np.ceil(np.sqrt(2 * N)) c = m - np.round(np.sqrt(2 * (N - indices))) - 1 r = np.mod(indices + (c + 1) * (c + 2) / 2 - 1, m) + 1 return np.array([r, c], dtype=np.int64)
python
def _map_tril_1d_on_2d(indices, dims): """Map 1d indices on lower triangular matrix in 2d. """ N = (dims * dims - dims) / 2 m = np.ceil(np.sqrt(2 * N)) c = m - np.round(np.sqrt(2 * (N - indices))) - 1 r = np.mod(indices + (c + 1) * (c + 2) / 2 - 1, m) + 1 return np.array([r, c], dtype=np.int64)
[ "def", "_map_tril_1d_on_2d", "(", "indices", ",", "dims", ")", ":", "N", "=", "(", "dims", "*", "dims", "-", "dims", ")", "/", "2", "m", "=", "np", ".", "ceil", "(", "np", ".", "sqrt", "(", "2", "*", "N", ")", ")", "c", "=", "m", "-", "np",...
Map 1d indices on lower triangular matrix in 2d.
[ "Map", "1d", "indices", "on", "lower", "triangular", "matrix", "in", "2d", "." ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/algorithms/indexing.py#L8-L17
train
206,473
J535D165/recordlinkage
recordlinkage/algorithms/indexing.py
_unique_rows_numpy
def _unique_rows_numpy(a): """return unique rows""" a = np.ascontiguousarray(a) unique_a = np.unique(a.view([('', a.dtype)] * a.shape[1])) return unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))
python
def _unique_rows_numpy(a): """return unique rows""" a = np.ascontiguousarray(a) unique_a = np.unique(a.view([('', a.dtype)] * a.shape[1])) return unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))
[ "def", "_unique_rows_numpy", "(", "a", ")", ":", "a", "=", "np", ".", "ascontiguousarray", "(", "a", ")", "unique_a", "=", "np", ".", "unique", "(", "a", ".", "view", "(", "[", "(", "''", ",", "a", ".", "dtype", ")", "]", "*", "a", ".", "shape"...
return unique rows
[ "return", "unique", "rows" ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/algorithms/indexing.py#L20-L24
train
206,474
J535D165/recordlinkage
recordlinkage/algorithms/indexing.py
random_pairs_with_replacement
def random_pairs_with_replacement(n, shape, random_state=None): """make random record pairs""" if not isinstance(random_state, np.random.RandomState): random_state = np.random.RandomState(random_state) n_max = max_pairs(shape) if n_max <= 0: raise ValueError('n_max must be larger than 0') # make random pairs indices = random_state.randint(0, n_max, n) if len(shape) == 1: return _map_tril_1d_on_2d(indices, shape[0]) else: return np.unravel_index(indices, shape)
python
def random_pairs_with_replacement(n, shape, random_state=None): """make random record pairs""" if not isinstance(random_state, np.random.RandomState): random_state = np.random.RandomState(random_state) n_max = max_pairs(shape) if n_max <= 0: raise ValueError('n_max must be larger than 0') # make random pairs indices = random_state.randint(0, n_max, n) if len(shape) == 1: return _map_tril_1d_on_2d(indices, shape[0]) else: return np.unravel_index(indices, shape)
[ "def", "random_pairs_with_replacement", "(", "n", ",", "shape", ",", "random_state", "=", "None", ")", ":", "if", "not", "isinstance", "(", "random_state", ",", "np", ".", "random", ".", "RandomState", ")", ":", "random_state", "=", "np", ".", "random", "....
make random record pairs
[ "make", "random", "record", "pairs" ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/algorithms/indexing.py#L27-L44
train
206,475
J535D165/recordlinkage
recordlinkage/algorithms/indexing.py
random_pairs_without_replacement_large_frames
def random_pairs_without_replacement_large_frames( n, shape, random_state=None): """Make a sample of random pairs with replacement""" n_max = max_pairs(shape) sample = np.array([]) # Run as long as the number of pairs is less than the requested number # of pairs n. while len(sample) < n: # The number of pairs to sample (sample twice as much record pairs # because the duplicates are dropped). n_sample_size = (n - len(sample)) * 2 sample = random_state.randint(n_max, size=n_sample_size) # concatenate pairs and deduplicate pairs_non_unique = np.append(sample, sample) sample = _unique_rows_numpy(pairs_non_unique) # return 2d indices if len(shape) == 1: return _map_tril_1d_on_2d(sample[0:n], shape[0]) else: return np.unravel_index(sample[0:n], shape)
python
def random_pairs_without_replacement_large_frames( n, shape, random_state=None): """Make a sample of random pairs with replacement""" n_max = max_pairs(shape) sample = np.array([]) # Run as long as the number of pairs is less than the requested number # of pairs n. while len(sample) < n: # The number of pairs to sample (sample twice as much record pairs # because the duplicates are dropped). n_sample_size = (n - len(sample)) * 2 sample = random_state.randint(n_max, size=n_sample_size) # concatenate pairs and deduplicate pairs_non_unique = np.append(sample, sample) sample = _unique_rows_numpy(pairs_non_unique) # return 2d indices if len(shape) == 1: return _map_tril_1d_on_2d(sample[0:n], shape[0]) else: return np.unravel_index(sample[0:n], shape)
[ "def", "random_pairs_without_replacement_large_frames", "(", "n", ",", "shape", ",", "random_state", "=", "None", ")", ":", "n_max", "=", "max_pairs", "(", "shape", ")", "sample", "=", "np", ".", "array", "(", "[", "]", ")", "# Run as long as the number of pairs...
Make a sample of random pairs with replacement
[ "Make", "a", "sample", "of", "random", "pairs", "with", "replacement" ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/algorithms/indexing.py#L69-L94
train
206,476
J535D165/recordlinkage
recordlinkage/preprocessing/cleaning.py
clean
def clean(s, lowercase=True, replace_by_none=r'[^ \-\_A-Za-z0-9]+', replace_by_whitespace=r'[\-\_]', strip_accents=None, remove_brackets=True, encoding='utf-8', decode_error='strict'): """Clean string variables. Clean strings in the Series by removing unwanted tokens, whitespace and brackets. Parameters ---------- s : pandas.Series A Series to clean. lower : bool, optional Convert strings in the Series to lowercase. Default True. replace_by_none : str, optional The matches of this regular expression are replaced by ''. replace_by_whitespace : str, optional The matches of this regular expression are replaced by a whitespace. remove_brackets : bool, optional Remove all content between brackets and the bracket themselves. Default True. strip_accents : {'ascii', 'unicode', None}, optional Remove accents during the preprocessing step. 'ascii' is a fast method that only works on characters that have an direct ASCII mapping. 'unicode' is a slightly slower method that works on any characters. None (default) does nothing. encoding : str, optional If bytes are given, this encoding is used to decode. Default is 'utf-8'. decode_error : {'strict', 'ignore', 'replace'}, optional Instruction on what to do if a byte Series is given that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. Example ------- >>> import pandas >>> from recordlinkage.preprocessing import clean >>> >>> names = ['Mary-ann', 'Bob :)', 'Angel', 'Bob (alias Billy)', None] >>> s = pandas.Series(names) >>> print(clean(s)) 0 mary ann 1 bob 2 angel 3 bob 4 NaN dtype: object Returns ------- pandas.Series: A cleaned Series of strings. """ if s.shape[0] == 0: return s # Lower s if lower is True if lowercase is True: s = s.str.lower() # Accent stripping based on https://github.com/scikit-learn/ # scikit-learn/blob/412996f/sklearn/feature_extraction/text.py # BSD license if not strip_accents: pass elif callable(strip_accents): strip_accents_fn = strip_accents elif strip_accents == 'ascii': strip_accents_fn = strip_accents_ascii elif strip_accents == 'unicode': strip_accents_fn = strip_accents_unicode else: raise ValueError( "Invalid value for 'strip_accents': {}".format(strip_accents) ) # Remove accents etc if strip_accents: def strip_accents_fn_wrapper(x): if sys.version_info[0] >= 3: if isinstance(x, str): return strip_accents_fn(x) else: return x else: if isinstance(x, unicode): # noqa return strip_accents_fn(x) else: return x # encoding s = s.apply( lambda x: x.decode(encoding, decode_error) if type(x) == bytes else x) s = s.map(lambda x: strip_accents_fn_wrapper(x)) # Remove all content between brackets if remove_brackets is True: s = s.str.replace(r'(\[.*?\]|\(.*?\)|\{.*?\})', '') # Remove the special characters if replace_by_none: s = s.str.replace(replace_by_none, '') if replace_by_whitespace: s = s.str.replace(replace_by_whitespace, ' ') # Remove multiple whitespaces s = s.str.replace(r'\s\s+', ' ') # Strip s s = s.str.lstrip().str.rstrip() return s
python
def clean(s, lowercase=True, replace_by_none=r'[^ \-\_A-Za-z0-9]+', replace_by_whitespace=r'[\-\_]', strip_accents=None, remove_brackets=True, encoding='utf-8', decode_error='strict'): """Clean string variables. Clean strings in the Series by removing unwanted tokens, whitespace and brackets. Parameters ---------- s : pandas.Series A Series to clean. lower : bool, optional Convert strings in the Series to lowercase. Default True. replace_by_none : str, optional The matches of this regular expression are replaced by ''. replace_by_whitespace : str, optional The matches of this regular expression are replaced by a whitespace. remove_brackets : bool, optional Remove all content between brackets and the bracket themselves. Default True. strip_accents : {'ascii', 'unicode', None}, optional Remove accents during the preprocessing step. 'ascii' is a fast method that only works on characters that have an direct ASCII mapping. 'unicode' is a slightly slower method that works on any characters. None (default) does nothing. encoding : str, optional If bytes are given, this encoding is used to decode. Default is 'utf-8'. decode_error : {'strict', 'ignore', 'replace'}, optional Instruction on what to do if a byte Series is given that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. Example ------- >>> import pandas >>> from recordlinkage.preprocessing import clean >>> >>> names = ['Mary-ann', 'Bob :)', 'Angel', 'Bob (alias Billy)', None] >>> s = pandas.Series(names) >>> print(clean(s)) 0 mary ann 1 bob 2 angel 3 bob 4 NaN dtype: object Returns ------- pandas.Series: A cleaned Series of strings. """ if s.shape[0] == 0: return s # Lower s if lower is True if lowercase is True: s = s.str.lower() # Accent stripping based on https://github.com/scikit-learn/ # scikit-learn/blob/412996f/sklearn/feature_extraction/text.py # BSD license if not strip_accents: pass elif callable(strip_accents): strip_accents_fn = strip_accents elif strip_accents == 'ascii': strip_accents_fn = strip_accents_ascii elif strip_accents == 'unicode': strip_accents_fn = strip_accents_unicode else: raise ValueError( "Invalid value for 'strip_accents': {}".format(strip_accents) ) # Remove accents etc if strip_accents: def strip_accents_fn_wrapper(x): if sys.version_info[0] >= 3: if isinstance(x, str): return strip_accents_fn(x) else: return x else: if isinstance(x, unicode): # noqa return strip_accents_fn(x) else: return x # encoding s = s.apply( lambda x: x.decode(encoding, decode_error) if type(x) == bytes else x) s = s.map(lambda x: strip_accents_fn_wrapper(x)) # Remove all content between brackets if remove_brackets is True: s = s.str.replace(r'(\[.*?\]|\(.*?\)|\{.*?\})', '') # Remove the special characters if replace_by_none: s = s.str.replace(replace_by_none, '') if replace_by_whitespace: s = s.str.replace(replace_by_whitespace, ' ') # Remove multiple whitespaces s = s.str.replace(r'\s\s+', ' ') # Strip s s = s.str.lstrip().str.rstrip() return s
[ "def", "clean", "(", "s", ",", "lowercase", "=", "True", ",", "replace_by_none", "=", "r'[^ \\-\\_A-Za-z0-9]+'", ",", "replace_by_whitespace", "=", "r'[\\-\\_]'", ",", "strip_accents", "=", "None", ",", "remove_brackets", "=", "True", ",", "encoding", "=", "'utf...
Clean string variables. Clean strings in the Series by removing unwanted tokens, whitespace and brackets. Parameters ---------- s : pandas.Series A Series to clean. lower : bool, optional Convert strings in the Series to lowercase. Default True. replace_by_none : str, optional The matches of this regular expression are replaced by ''. replace_by_whitespace : str, optional The matches of this regular expression are replaced by a whitespace. remove_brackets : bool, optional Remove all content between brackets and the bracket themselves. Default True. strip_accents : {'ascii', 'unicode', None}, optional Remove accents during the preprocessing step. 'ascii' is a fast method that only works on characters that have an direct ASCII mapping. 'unicode' is a slightly slower method that works on any characters. None (default) does nothing. encoding : str, optional If bytes are given, this encoding is used to decode. Default is 'utf-8'. decode_error : {'strict', 'ignore', 'replace'}, optional Instruction on what to do if a byte Series is given that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. Example ------- >>> import pandas >>> from recordlinkage.preprocessing import clean >>> >>> names = ['Mary-ann', 'Bob :)', 'Angel', 'Bob (alias Billy)', None] >>> s = pandas.Series(names) >>> print(clean(s)) 0 mary ann 1 bob 2 angel 3 bob 4 NaN dtype: object Returns ------- pandas.Series: A cleaned Series of strings.
[ "Clean", "string", "variables", "." ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/preprocessing/cleaning.py#L11-L133
train
206,477
J535D165/recordlinkage
recordlinkage/preprocessing/cleaning.py
value_occurence
def value_occurence(s): """Count the number of times each value occurs. This function returns the counts for each row, in contrast with `pandas.value_counts <http://pandas.pydata.org/pandas- docs/stable/generated/pandas.Series.value_counts.html>`_. Returns ------- pandas.Series A Series with value counts. """ # https://github.com/pydata/pandas/issues/3729 value_count = s.fillna('NAN') return value_count.groupby(by=value_count).transform('count')
python
def value_occurence(s): """Count the number of times each value occurs. This function returns the counts for each row, in contrast with `pandas.value_counts <http://pandas.pydata.org/pandas- docs/stable/generated/pandas.Series.value_counts.html>`_. Returns ------- pandas.Series A Series with value counts. """ # https://github.com/pydata/pandas/issues/3729 value_count = s.fillna('NAN') return value_count.groupby(by=value_count).transform('count')
[ "def", "value_occurence", "(", "s", ")", ":", "# https://github.com/pydata/pandas/issues/3729", "value_count", "=", "s", ".", "fillna", "(", "'NAN'", ")", "return", "value_count", ".", "groupby", "(", "by", "=", "value_count", ")", ".", "transform", "(", "'count...
Count the number of times each value occurs. This function returns the counts for each row, in contrast with `pandas.value_counts <http://pandas.pydata.org/pandas- docs/stable/generated/pandas.Series.value_counts.html>`_. Returns ------- pandas.Series A Series with value counts.
[ "Count", "the", "number", "of", "times", "each", "value", "occurs", "." ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/preprocessing/cleaning.py#L157-L174
train
206,478
J535D165/recordlinkage
recordlinkage/algorithms/nb_sklearn.py
safe_sparse_dot
def safe_sparse_dot(a, b, dense_output=False): """Dot product that handle the sparse matrix case correctly Uses BLAS GEMM as replacement for numpy.dot where possible to avoid unnecessary copies. Parameters ---------- a : array or sparse matrix b : array or sparse matrix dense_output : boolean, default False When False, either ``a`` or ``b`` being sparse will yield sparse output. When True, output will always be an array. Returns ------- dot_product : array or sparse matrix sparse if ``a`` or ``b`` is sparse and ``dense_output=False``. """ if issparse(a) or issparse(b): ret = a * b if dense_output and hasattr(ret, "toarray"): ret = ret.toarray() return ret else: return np.dot(a, b)
python
def safe_sparse_dot(a, b, dense_output=False): """Dot product that handle the sparse matrix case correctly Uses BLAS GEMM as replacement for numpy.dot where possible to avoid unnecessary copies. Parameters ---------- a : array or sparse matrix b : array or sparse matrix dense_output : boolean, default False When False, either ``a`` or ``b`` being sparse will yield sparse output. When True, output will always be an array. Returns ------- dot_product : array or sparse matrix sparse if ``a`` or ``b`` is sparse and ``dense_output=False``. """ if issparse(a) or issparse(b): ret = a * b if dense_output and hasattr(ret, "toarray"): ret = ret.toarray() return ret else: return np.dot(a, b)
[ "def", "safe_sparse_dot", "(", "a", ",", "b", ",", "dense_output", "=", "False", ")", ":", "if", "issparse", "(", "a", ")", "or", "issparse", "(", "b", ")", ":", "ret", "=", "a", "*", "b", "if", "dense_output", "and", "hasattr", "(", "ret", ",", ...
Dot product that handle the sparse matrix case correctly Uses BLAS GEMM as replacement for numpy.dot where possible to avoid unnecessary copies. Parameters ---------- a : array or sparse matrix b : array or sparse matrix dense_output : boolean, default False When False, either ``a`` or ``b`` being sparse will yield sparse output. When True, output will always be an array. Returns ------- dot_product : array or sparse matrix sparse if ``a`` or ``b`` is sparse and ``dense_output=False``.
[ "Dot", "product", "that", "handle", "the", "sparse", "matrix", "case", "correctly", "Uses", "BLAS", "GEMM", "as", "replacement", "for", "numpy", ".", "dot", "where", "possible", "to", "avoid", "unnecessary", "copies", "." ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/algorithms/nb_sklearn.py#L78-L101
train
206,479
J535D165/recordlinkage
recordlinkage/algorithms/nb_sklearn.py
BaseNB._joint_log_likelihood
def _joint_log_likelihood(self, X): """Calculate the posterior log probability of the samples X""" check_is_fitted(self, "classes_") X = check_array(X, accept_sparse='csr') X_bin = self._transform_data(X) n_classes, n_features = self.feature_log_prob_.shape n_samples, n_features_X = X_bin.shape if n_features_X != n_features: raise ValueError( "Expected input with %d features, got %d instead" % (n_features, n_features_X)) # see chapter 4.1 of http://www.cs.columbia.edu/~mcollins/em.pdf # implementation as in Formula 4. jll = safe_sparse_dot(X_bin, self.feature_log_prob_.T) jll += self.class_log_prior_ return jll
python
def _joint_log_likelihood(self, X): """Calculate the posterior log probability of the samples X""" check_is_fitted(self, "classes_") X = check_array(X, accept_sparse='csr') X_bin = self._transform_data(X) n_classes, n_features = self.feature_log_prob_.shape n_samples, n_features_X = X_bin.shape if n_features_X != n_features: raise ValueError( "Expected input with %d features, got %d instead" % (n_features, n_features_X)) # see chapter 4.1 of http://www.cs.columbia.edu/~mcollins/em.pdf # implementation as in Formula 4. jll = safe_sparse_dot(X_bin, self.feature_log_prob_.T) jll += self.class_log_prior_ return jll
[ "def", "_joint_log_likelihood", "(", "self", ",", "X", ")", ":", "check_is_fitted", "(", "self", ",", "\"classes_\"", ")", "X", "=", "check_array", "(", "X", ",", "accept_sparse", "=", "'csr'", ")", "X_bin", "=", "self", ".", "_transform_data", "(", "X", ...
Calculate the posterior log probability of the samples X
[ "Calculate", "the", "posterior", "log", "probability", "of", "the", "samples", "X" ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/algorithms/nb_sklearn.py#L113-L134
train
206,480
J535D165/recordlinkage
recordlinkage/algorithms/nb_sklearn.py
BaseNB.predict
def predict(self, X): """ Perform classification on an array of test vectors X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = [n_samples] Predicted target values for X """ jll = self._joint_log_likelihood(X) return self.classes_[np.argmax(jll, axis=1)]
python
def predict(self, X): """ Perform classification on an array of test vectors X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = [n_samples] Predicted target values for X """ jll = self._joint_log_likelihood(X) return self.classes_[np.argmax(jll, axis=1)]
[ "def", "predict", "(", "self", ",", "X", ")", ":", "jll", "=", "self", ".", "_joint_log_likelihood", "(", "X", ")", "return", "self", ".", "classes_", "[", "np", ".", "argmax", "(", "jll", ",", "axis", "=", "1", ")", "]" ]
Perform classification on an array of test vectors X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = [n_samples] Predicted target values for X
[ "Perform", "classification", "on", "an", "array", "of", "test", "vectors", "X", "." ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/algorithms/nb_sklearn.py#L136-L150
train
206,481
J535D165/recordlinkage
recordlinkage/algorithms/nb_sklearn.py
BaseNB.predict_log_proba
def predict_log_proba(self, X): """ Return log-probability estimates for the test vector X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array-like, shape = [n_samples, n_classes] Returns the log-probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`. """ jll = self._joint_log_likelihood(X) # normalize by P(x) = P(f_1, ..., f_n) log_prob_x = logsumexp(jll, axis=1) # return shape = (2,) return jll - np.atleast_2d(log_prob_x).T
python
def predict_log_proba(self, X): """ Return log-probability estimates for the test vector X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array-like, shape = [n_samples, n_classes] Returns the log-probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`. """ jll = self._joint_log_likelihood(X) # normalize by P(x) = P(f_1, ..., f_n) log_prob_x = logsumexp(jll, axis=1) # return shape = (2,) return jll - np.atleast_2d(log_prob_x).T
[ "def", "predict_log_proba", "(", "self", ",", "X", ")", ":", "jll", "=", "self", ".", "_joint_log_likelihood", "(", "X", ")", "# normalize by P(x) = P(f_1, ..., f_n)", "log_prob_x", "=", "logsumexp", "(", "jll", ",", "axis", "=", "1", ")", "# return shape = (2,)...
Return log-probability estimates for the test vector X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array-like, shape = [n_samples, n_classes] Returns the log-probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`.
[ "Return", "log", "-", "probability", "estimates", "for", "the", "test", "vector", "X", "." ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/algorithms/nb_sklearn.py#L152-L172
train
206,482
J535D165/recordlinkage
recordlinkage/algorithms/nb_sklearn.py
NaiveBayes._count
def _count(self, X, Y): """Count and smooth feature occurrences.""" self.feature_count_ += safe_sparse_dot(Y.T, X) self.class_count_ += Y.sum(axis=0)
python
def _count(self, X, Y): """Count and smooth feature occurrences.""" self.feature_count_ += safe_sparse_dot(Y.T, X) self.class_count_ += Y.sum(axis=0)
[ "def", "_count", "(", "self", ",", "X", ",", "Y", ")", ":", "self", ".", "feature_count_", "+=", "safe_sparse_dot", "(", "Y", ".", "T", ",", "X", ")", "self", ".", "class_count_", "+=", "Y", ".", "sum", "(", "axis", "=", "0", ")" ]
Count and smooth feature occurrences.
[ "Count", "and", "smooth", "feature", "occurrences", "." ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/algorithms/nb_sklearn.py#L298-L302
train
206,483
J535D165/recordlinkage
recordlinkage/algorithms/nb_sklearn.py
NaiveBayes._update_feature_log_prob
def _update_feature_log_prob(self, alpha): """Apply smoothing to raw counts and recompute log probabilities""" smoothed_fc = self.feature_count_ + alpha smoothed_cc = self.class_count_ + alpha * 2 self.feature_log_prob_ = (np.log(smoothed_fc) - np.log(smoothed_cc.reshape(-1, 1)))
python
def _update_feature_log_prob(self, alpha): """Apply smoothing to raw counts and recompute log probabilities""" smoothed_fc = self.feature_count_ + alpha smoothed_cc = self.class_count_ + alpha * 2 self.feature_log_prob_ = (np.log(smoothed_fc) - np.log(smoothed_cc.reshape(-1, 1)))
[ "def", "_update_feature_log_prob", "(", "self", ",", "alpha", ")", ":", "smoothed_fc", "=", "self", ".", "feature_count_", "+", "alpha", "smoothed_cc", "=", "self", ".", "class_count_", "+", "alpha", "*", "2", "self", ".", "feature_log_prob_", "=", "(", "np"...
Apply smoothing to raw counts and recompute log probabilities
[ "Apply", "smoothing", "to", "raw", "counts", "and", "recompute", "log", "probabilities" ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/algorithms/nb_sklearn.py#L304-L310
train
206,484
J535D165/recordlinkage
recordlinkage/algorithms/nb_sklearn.py
NaiveBayes.fit
def fit(self, X, y, sample_weight=None): """Fit Naive Bayes classifier according to X, y Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples], (default=None) Weights applied to individual samples (1. for unweighted). Returns ------- self : object """ X, y = check_X_y(X, y, 'csr') # Transform data with a label binarizer. Each column will get # transformed into a N columns (for each distinct value a column). For # a situation with 0 and 1 outcome values, the result given two # columns. X_bin = self._fit_data(X) _, n_features = X_bin.shape # prepare Y labelbin = LabelBinarizer() Y = labelbin.fit_transform(y) self.classes_ = labelbin.classes_ if Y.shape[1] == 1: Y = np.concatenate((1 - Y, Y), axis=1) # LabelBinarizer().fit_transform() returns arrays with dtype=np.int64. # We convert it to np.float64 to support sample_weight consistently; # this means we also don't have to cast X to floating point Y = Y.astype(np.float64) if sample_weight is not None: sample_weight = np.atleast_2d(sample_weight) Y *= check_array(sample_weight).T class_prior = self.class_prior # Count raw events from data before updating the class log prior # and feature log probas n_effective_classes = Y.shape[1] self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64) self.feature_count_ = np.zeros((n_effective_classes, n_features), dtype=np.float64) self._count(X_bin, Y) alpha = self._check_alpha() self._update_feature_log_prob(alpha) self._update_class_log_prior(class_prior=class_prior) return self
python
def fit(self, X, y, sample_weight=None): """Fit Naive Bayes classifier according to X, y Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples], (default=None) Weights applied to individual samples (1. for unweighted). Returns ------- self : object """ X, y = check_X_y(X, y, 'csr') # Transform data with a label binarizer. Each column will get # transformed into a N columns (for each distinct value a column). For # a situation with 0 and 1 outcome values, the result given two # columns. X_bin = self._fit_data(X) _, n_features = X_bin.shape # prepare Y labelbin = LabelBinarizer() Y = labelbin.fit_transform(y) self.classes_ = labelbin.classes_ if Y.shape[1] == 1: Y = np.concatenate((1 - Y, Y), axis=1) # LabelBinarizer().fit_transform() returns arrays with dtype=np.int64. # We convert it to np.float64 to support sample_weight consistently; # this means we also don't have to cast X to floating point Y = Y.astype(np.float64) if sample_weight is not None: sample_weight = np.atleast_2d(sample_weight) Y *= check_array(sample_weight).T class_prior = self.class_prior # Count raw events from data before updating the class log prior # and feature log probas n_effective_classes = Y.shape[1] self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64) self.feature_count_ = np.zeros((n_effective_classes, n_features), dtype=np.float64) self._count(X_bin, Y) alpha = self._check_alpha() self._update_feature_log_prob(alpha) self._update_class_log_prior(class_prior=class_prior) return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", ",", "sample_weight", "=", "None", ")", ":", "X", ",", "y", "=", "check_X_y", "(", "X", ",", "y", ",", "'csr'", ")", "# Transform data with a label binarizer. Each column will get", "# transformed into a N columns (...
Fit Naive Bayes classifier according to X, y Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples], (default=None) Weights applied to individual samples (1. for unweighted). Returns ------- self : object
[ "Fit", "Naive", "Bayes", "classifier", "according", "to", "X", "y" ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/algorithms/nb_sklearn.py#L340-L394
train
206,485
J535D165/recordlinkage
recordlinkage/algorithms/nb_sklearn.py
ECM.fit
def fit(self, X): """Fit ECM classifier according to X Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- self : object Returns self. """ X = check_array(X, accept_sparse='csr') # count frequencies of elements in vector space # based on https://stackoverflow.com/a/33235665 # faster than numpy.unique X_unique, X_freq = np.unique(X, axis=0, return_counts=True) X_freq = np.atleast_2d(X_freq) # Transform data with a label binarizer. Each column will get # transformed into a N columns (for each distinct value a column). For # a situation with 0 and 1 outcome values, the result given two # columns. X_unique_bin = self._fit_data(X_unique) _, n_features = X_unique_bin.shape # initialise parameters self.classes_ = np.array([0, 1]) if is_string_like(self.init) and self.init == 'random': self.class_log_prior_, self.feature_log_prob_ = \ self._init_parameters_random(X_unique_bin) elif is_string_like(self.init) and self.init == 'jaro': self.class_log_prior_, self.feature_log_prob_ = \ self._init_parameters_jaro(X_unique_bin) else: raise ValueError("'{}' is not a valid value for " "argument 'init'".format(self.init)) iteration = 0 stop_iteration = False self._log_class_log_prior = np.atleast_2d(self.class_log_prior_) self._log_feature_log_prob = np.atleast_3d(self.feature_log_prob_) while iteration < self.max_iter and not stop_iteration: # expectation step g = self.predict_proba(X_unique) g_freq = g * X_freq.T g_freq_sum = g_freq.sum(axis=0) # maximisation step class_log_prior_ = np.log(g_freq_sum) - np.log(X.shape[0]) # p feature_log_prob_ = np.log(safe_sparse_dot(g_freq.T, X_unique_bin)) feature_log_prob_ -= np.log(np.atleast_2d(g_freq_sum).T) # Stop iterating when the class prior and feature probs are close # to the values in the to previous iteration (parameters starting # with 'self'). class_log_prior_close = np.allclose( class_log_prior_, self.class_log_prior_, atol=self.atol) feature_log_prob_close = np.allclose( feature_log_prob_, self.feature_log_prob_, atol=self.atol) if (class_log_prior_close and feature_log_prob_close): stop_iteration = True if np.all(np.isnan(feature_log_prob_)): stop_iteration = True # Update the class prior and feature probs. self.class_log_prior_ = class_log_prior_ self.feature_log_prob_ = feature_log_prob_ # create logs self._log_class_log_prior = np.concatenate( [self._log_class_log_prior, np.atleast_2d(self.class_log_prior_)] ) self._log_feature_log_prob = np.concatenate( [self._log_feature_log_prob, np.atleast_3d(self.feature_log_prob_)], axis=2 ) # Increment counter iteration += 1 return self
python
def fit(self, X): """Fit ECM classifier according to X Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- self : object Returns self. """ X = check_array(X, accept_sparse='csr') # count frequencies of elements in vector space # based on https://stackoverflow.com/a/33235665 # faster than numpy.unique X_unique, X_freq = np.unique(X, axis=0, return_counts=True) X_freq = np.atleast_2d(X_freq) # Transform data with a label binarizer. Each column will get # transformed into a N columns (for each distinct value a column). For # a situation with 0 and 1 outcome values, the result given two # columns. X_unique_bin = self._fit_data(X_unique) _, n_features = X_unique_bin.shape # initialise parameters self.classes_ = np.array([0, 1]) if is_string_like(self.init) and self.init == 'random': self.class_log_prior_, self.feature_log_prob_ = \ self._init_parameters_random(X_unique_bin) elif is_string_like(self.init) and self.init == 'jaro': self.class_log_prior_, self.feature_log_prob_ = \ self._init_parameters_jaro(X_unique_bin) else: raise ValueError("'{}' is not a valid value for " "argument 'init'".format(self.init)) iteration = 0 stop_iteration = False self._log_class_log_prior = np.atleast_2d(self.class_log_prior_) self._log_feature_log_prob = np.atleast_3d(self.feature_log_prob_) while iteration < self.max_iter and not stop_iteration: # expectation step g = self.predict_proba(X_unique) g_freq = g * X_freq.T g_freq_sum = g_freq.sum(axis=0) # maximisation step class_log_prior_ = np.log(g_freq_sum) - np.log(X.shape[0]) # p feature_log_prob_ = np.log(safe_sparse_dot(g_freq.T, X_unique_bin)) feature_log_prob_ -= np.log(np.atleast_2d(g_freq_sum).T) # Stop iterating when the class prior and feature probs are close # to the values in the to previous iteration (parameters starting # with 'self'). class_log_prior_close = np.allclose( class_log_prior_, self.class_log_prior_, atol=self.atol) feature_log_prob_close = np.allclose( feature_log_prob_, self.feature_log_prob_, atol=self.atol) if (class_log_prior_close and feature_log_prob_close): stop_iteration = True if np.all(np.isnan(feature_log_prob_)): stop_iteration = True # Update the class prior and feature probs. self.class_log_prior_ = class_log_prior_ self.feature_log_prob_ = feature_log_prob_ # create logs self._log_class_log_prior = np.concatenate( [self._log_class_log_prior, np.atleast_2d(self.class_log_prior_)] ) self._log_feature_log_prob = np.concatenate( [self._log_feature_log_prob, np.atleast_3d(self.feature_log_prob_)], axis=2 ) # Increment counter iteration += 1 return self
[ "def", "fit", "(", "self", ",", "X", ")", ":", "X", "=", "check_array", "(", "X", ",", "accept_sparse", "=", "'csr'", ")", "# count frequencies of elements in vector space", "# based on https://stackoverflow.com/a/33235665", "# faster than numpy.unique", "X_unique", ",", ...
Fit ECM classifier according to X Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- self : object Returns self.
[ "Fit", "ECM", "classifier", "according", "to", "X" ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/algorithms/nb_sklearn.py#L518-L606
train
206,486
J535D165/recordlinkage
recordlinkage/index.py
SortedNeighbourhood._get_sorting_key_values
def _get_sorting_key_values(self, array1, array2): """return the sorting key values as a series""" concat_arrays = numpy.concatenate([array1, array2]) unique_values = numpy.unique(concat_arrays) return numpy.sort(unique_values)
python
def _get_sorting_key_values(self, array1, array2): """return the sorting key values as a series""" concat_arrays = numpy.concatenate([array1, array2]) unique_values = numpy.unique(concat_arrays) return numpy.sort(unique_values)
[ "def", "_get_sorting_key_values", "(", "self", ",", "array1", ",", "array2", ")", ":", "concat_arrays", "=", "numpy", ".", "concatenate", "(", "[", "array1", ",", "array2", "]", ")", "unique_values", "=", "numpy", ".", "unique", "(", "concat_arrays", ")", ...
return the sorting key values as a series
[ "return", "the", "sorting", "key", "values", "as", "a", "series" ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/index.py#L255-L261
train
206,487
J535D165/recordlinkage
recordlinkage/network.py
ConnectedComponents.compute
def compute(self, links): """Return the connected components. Parameters ---------- links : pandas.MultiIndex The links to apply one-to-one matching on. Returns ------- list of pandas.MultiIndex A list with pandas.MultiIndex objects. Each MultiIndex object represents a set of connected record pairs. """ try: import networkx as nx except ImportError(): raise Exception("'networkx' module is needed for this operation") G = nx.Graph() G.add_edges_from(links.values) connected_components = nx.connected_component_subgraphs(G) links_result = [pd.MultiIndex.from_tuples(subgraph.edges()) for subgraph in connected_components] return links_result
python
def compute(self, links): """Return the connected components. Parameters ---------- links : pandas.MultiIndex The links to apply one-to-one matching on. Returns ------- list of pandas.MultiIndex A list with pandas.MultiIndex objects. Each MultiIndex object represents a set of connected record pairs. """ try: import networkx as nx except ImportError(): raise Exception("'networkx' module is needed for this operation") G = nx.Graph() G.add_edges_from(links.values) connected_components = nx.connected_component_subgraphs(G) links_result = [pd.MultiIndex.from_tuples(subgraph.edges()) for subgraph in connected_components] return links_result
[ "def", "compute", "(", "self", ",", "links", ")", ":", "try", ":", "import", "networkx", "as", "nx", "except", "ImportError", "(", ")", ":", "raise", "Exception", "(", "\"'networkx' module is needed for this operation\"", ")", "G", "=", "nx", ".", "Graph", "...
Return the connected components. Parameters ---------- links : pandas.MultiIndex The links to apply one-to-one matching on. Returns ------- list of pandas.MultiIndex A list with pandas.MultiIndex objects. Each MultiIndex object represents a set of connected record pairs.
[ "Return", "the", "connected", "components", "." ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/network.py#L168-L196
train
206,488
J535D165/recordlinkage
recordlinkage/adapters.py
SKLearnAdapter._prob_match
def _prob_match(self, features): """Compute match probabilities. Parameters ---------- features : numpy.ndarray The data to train the model on. Returns ------- numpy.ndarray The match probabilties. """ # compute the probabilities probs = self.kernel.predict_proba(features) # get the position of match probabilities classes = list(self.kernel.classes_) match_class_position = classes.index(1) return probs[:, match_class_position]
python
def _prob_match(self, features): """Compute match probabilities. Parameters ---------- features : numpy.ndarray The data to train the model on. Returns ------- numpy.ndarray The match probabilties. """ # compute the probabilities probs = self.kernel.predict_proba(features) # get the position of match probabilities classes = list(self.kernel.classes_) match_class_position = classes.index(1) return probs[:, match_class_position]
[ "def", "_prob_match", "(", "self", ",", "features", ")", ":", "# compute the probabilities", "probs", "=", "self", ".", "kernel", ".", "predict_proba", "(", "features", ")", "# get the position of match probabilities", "classes", "=", "list", "(", "self", ".", "ke...
Compute match probabilities. Parameters ---------- features : numpy.ndarray The data to train the model on. Returns ------- numpy.ndarray The match probabilties.
[ "Compute", "match", "probabilities", "." ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/adapters.py#L57-L78
train
206,489
J535D165/recordlinkage
recordlinkage/adapters.py
KerasAdapter._predict
def _predict(self, features): """Predict matches and non-matches. Parameters ---------- features : numpy.ndarray The data to predict the class of. Returns ------- numpy.ndarray The predicted classes. """ from sklearn.exceptions import NotFittedError try: prediction = self.kernel.predict_classes(features)[:, 0] except NotFittedError: raise NotFittedError( "{} is not fitted yet. Call 'fit' with appropriate " "arguments before using this method.".format( type(self).__name__ ) ) return prediction
python
def _predict(self, features): """Predict matches and non-matches. Parameters ---------- features : numpy.ndarray The data to predict the class of. Returns ------- numpy.ndarray The predicted classes. """ from sklearn.exceptions import NotFittedError try: prediction = self.kernel.predict_classes(features)[:, 0] except NotFittedError: raise NotFittedError( "{} is not fitted yet. Call 'fit' with appropriate " "arguments before using this method.".format( type(self).__name__ ) ) return prediction
[ "def", "_predict", "(", "self", ",", "features", ")", ":", "from", "sklearn", ".", "exceptions", "import", "NotFittedError", "try", ":", "prediction", "=", "self", ".", "kernel", ".", "predict_classes", "(", "features", ")", "[", ":", ",", "0", "]", "exc...
Predict matches and non-matches. Parameters ---------- features : numpy.ndarray The data to predict the class of. Returns ------- numpy.ndarray The predicted classes.
[ "Predict", "matches", "and", "non", "-", "matches", "." ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/adapters.py#L96-L122
train
206,490
J535D165/recordlinkage
recordlinkage/datasets/febrl.py
_febrl_links
def _febrl_links(df): """Get the links of a FEBRL dataset.""" index = df.index.to_series() keys = index.str.extract(r'rec-(\d+)', expand=True)[0] index_int = numpy.arange(len(df)) df_helper = pandas.DataFrame({ 'key': keys, 'index': index_int }) # merge the two frame and make MultiIndex. pairs_df = df_helper.merge( df_helper, on='key' )[['index_x', 'index_y']] pairs_df = pairs_df[pairs_df['index_x'] > pairs_df['index_y']] return pandas.MultiIndex( levels=[df.index.values, df.index.values], labels=[pairs_df['index_x'].values, pairs_df['index_y'].values], names=[None, None], verify_integrity=False )
python
def _febrl_links(df): """Get the links of a FEBRL dataset.""" index = df.index.to_series() keys = index.str.extract(r'rec-(\d+)', expand=True)[0] index_int = numpy.arange(len(df)) df_helper = pandas.DataFrame({ 'key': keys, 'index': index_int }) # merge the two frame and make MultiIndex. pairs_df = df_helper.merge( df_helper, on='key' )[['index_x', 'index_y']] pairs_df = pairs_df[pairs_df['index_x'] > pairs_df['index_y']] return pandas.MultiIndex( levels=[df.index.values, df.index.values], labels=[pairs_df['index_x'].values, pairs_df['index_y'].values], names=[None, None], verify_integrity=False )
[ "def", "_febrl_links", "(", "df", ")", ":", "index", "=", "df", ".", "index", ".", "to_series", "(", ")", "keys", "=", "index", ".", "str", ".", "extract", "(", "r'rec-(\\d+)'", ",", "expand", "=", "True", ")", "[", "0", "]", "index_int", "=", "num...
Get the links of a FEBRL dataset.
[ "Get", "the", "links", "of", "a", "FEBRL", "dataset", "." ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/datasets/febrl.py#L28-L52
train
206,491
J535D165/recordlinkage
recordlinkage/datasets/febrl.py
load_febrl1
def load_febrl1(return_links=False): """Load the FEBRL 1 dataset. The Freely Extensible Biomedical Record Linkage (Febrl) package is distributed with a dataset generator and four datasets generated with the generator. This function returns the first Febrl dataset as a :class:`pandas.DataFrame`. *"This data set contains 1000 records (500 original and 500 duplicates, with exactly one duplicate per original record."* Parameters ---------- return_links: bool When True, the function returns also the true links. Returns ------- pandas.DataFrame A :class:`pandas.DataFrame` with Febrl dataset1.csv. When return_links is True, the function returns also the true links. The true links are all links in the lower triangular part of the matrix. """ df = _febrl_load_data('dataset1.csv') if return_links: links = _febrl_links(df) return df, links else: return df
python
def load_febrl1(return_links=False): """Load the FEBRL 1 dataset. The Freely Extensible Biomedical Record Linkage (Febrl) package is distributed with a dataset generator and four datasets generated with the generator. This function returns the first Febrl dataset as a :class:`pandas.DataFrame`. *"This data set contains 1000 records (500 original and 500 duplicates, with exactly one duplicate per original record."* Parameters ---------- return_links: bool When True, the function returns also the true links. Returns ------- pandas.DataFrame A :class:`pandas.DataFrame` with Febrl dataset1.csv. When return_links is True, the function returns also the true links. The true links are all links in the lower triangular part of the matrix. """ df = _febrl_load_data('dataset1.csv') if return_links: links = _febrl_links(df) return df, links else: return df
[ "def", "load_febrl1", "(", "return_links", "=", "False", ")", ":", "df", "=", "_febrl_load_data", "(", "'dataset1.csv'", ")", "if", "return_links", ":", "links", "=", "_febrl_links", "(", "df", ")", "return", "df", ",", "links", "else", ":", "return", "df"...
Load the FEBRL 1 dataset. The Freely Extensible Biomedical Record Linkage (Febrl) package is distributed with a dataset generator and four datasets generated with the generator. This function returns the first Febrl dataset as a :class:`pandas.DataFrame`. *"This data set contains 1000 records (500 original and 500 duplicates, with exactly one duplicate per original record."* Parameters ---------- return_links: bool When True, the function returns also the true links. Returns ------- pandas.DataFrame A :class:`pandas.DataFrame` with Febrl dataset1.csv. When return_links is True, the function returns also the true links. The true links are all links in the lower triangular part of the matrix.
[ "Load", "the", "FEBRL", "1", "dataset", "." ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/datasets/febrl.py#L55-L88
train
206,492
J535D165/recordlinkage
recordlinkage/datasets/febrl.py
load_febrl2
def load_febrl2(return_links=False): """Load the FEBRL 2 dataset. The Freely Extensible Biomedical Record Linkage (Febrl) package is distributed with a dataset generator and four datasets generated with the generator. This function returns the second Febrl dataset as a :class:`pandas.DataFrame`. *"This data set contains 5000 records (4000 originals and 1000 duplicates), with a maximum of 5 duplicates based on one original record (and a poisson distribution of duplicate records). Distribution of duplicates: 19 originals records have 5 duplicate records 47 originals records have 4 duplicate records 107 originals records have 3 duplicate records 141 originals records have 2 duplicate records 114 originals records have 1 duplicate record 572 originals records have no duplicate record"* Parameters ---------- return_links: bool When True, the function returns also the true links. Returns ------- pandas.DataFrame A :class:`pandas.DataFrame` with Febrl dataset2.csv. When return_links is True, the function returns also the true links. The true links are all links in the lower triangular part of the matrix. """ df = _febrl_load_data('dataset2.csv') if return_links: links = _febrl_links(df) return df, links else: return df
python
def load_febrl2(return_links=False): """Load the FEBRL 2 dataset. The Freely Extensible Biomedical Record Linkage (Febrl) package is distributed with a dataset generator and four datasets generated with the generator. This function returns the second Febrl dataset as a :class:`pandas.DataFrame`. *"This data set contains 5000 records (4000 originals and 1000 duplicates), with a maximum of 5 duplicates based on one original record (and a poisson distribution of duplicate records). Distribution of duplicates: 19 originals records have 5 duplicate records 47 originals records have 4 duplicate records 107 originals records have 3 duplicate records 141 originals records have 2 duplicate records 114 originals records have 1 duplicate record 572 originals records have no duplicate record"* Parameters ---------- return_links: bool When True, the function returns also the true links. Returns ------- pandas.DataFrame A :class:`pandas.DataFrame` with Febrl dataset2.csv. When return_links is True, the function returns also the true links. The true links are all links in the lower triangular part of the matrix. """ df = _febrl_load_data('dataset2.csv') if return_links: links = _febrl_links(df) return df, links else: return df
[ "def", "load_febrl2", "(", "return_links", "=", "False", ")", ":", "df", "=", "_febrl_load_data", "(", "'dataset2.csv'", ")", "if", "return_links", ":", "links", "=", "_febrl_links", "(", "df", ")", "return", "df", ",", "links", "else", ":", "return", "df"...
Load the FEBRL 2 dataset. The Freely Extensible Biomedical Record Linkage (Febrl) package is distributed with a dataset generator and four datasets generated with the generator. This function returns the second Febrl dataset as a :class:`pandas.DataFrame`. *"This data set contains 5000 records (4000 originals and 1000 duplicates), with a maximum of 5 duplicates based on one original record (and a poisson distribution of duplicate records). Distribution of duplicates: 19 originals records have 5 duplicate records 47 originals records have 4 duplicate records 107 originals records have 3 duplicate records 141 originals records have 2 duplicate records 114 originals records have 1 duplicate record 572 originals records have no duplicate record"* Parameters ---------- return_links: bool When True, the function returns also the true links. Returns ------- pandas.DataFrame A :class:`pandas.DataFrame` with Febrl dataset2.csv. When return_links is True, the function returns also the true links. The true links are all links in the lower triangular part of the matrix.
[ "Load", "the", "FEBRL", "2", "dataset", "." ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/datasets/febrl.py#L91-L131
train
206,493
J535D165/recordlinkage
recordlinkage/datasets/febrl.py
load_febrl3
def load_febrl3(return_links=False): """Load the FEBRL 3 dataset. The Freely Extensible Biomedical Record Linkage (Febrl) package is distributed with a dataset generator and four datasets generated with the generator. This function returns the third Febrl dataset as a :class:`pandas.DataFrame`. *"This data set contains 5000 records (2000 originals and 3000 duplicates), with a maximum of 5 duplicates based on one original record (and a Zipf distribution of duplicate records). Distribution of duplicates: 168 originals records have 5 duplicate records 161 originals records have 4 duplicate records 212 originals records have 3 duplicate records 256 originals records have 2 duplicate records 368 originals records have 1 duplicate record 1835 originals records have no duplicate record"* Parameters ---------- return_links: bool When True, the function returns also the true links. Returns ------- pandas.DataFrame A :class:`pandas.DataFrame` with Febrl dataset3.csv. When return_links is True, the function returns also the true links. The true links are all links in the lower triangular part of the matrix. """ df = _febrl_load_data('dataset3.csv') if return_links: links = _febrl_links(df) return df, links else: return df
python
def load_febrl3(return_links=False): """Load the FEBRL 3 dataset. The Freely Extensible Biomedical Record Linkage (Febrl) package is distributed with a dataset generator and four datasets generated with the generator. This function returns the third Febrl dataset as a :class:`pandas.DataFrame`. *"This data set contains 5000 records (2000 originals and 3000 duplicates), with a maximum of 5 duplicates based on one original record (and a Zipf distribution of duplicate records). Distribution of duplicates: 168 originals records have 5 duplicate records 161 originals records have 4 duplicate records 212 originals records have 3 duplicate records 256 originals records have 2 duplicate records 368 originals records have 1 duplicate record 1835 originals records have no duplicate record"* Parameters ---------- return_links: bool When True, the function returns also the true links. Returns ------- pandas.DataFrame A :class:`pandas.DataFrame` with Febrl dataset3.csv. When return_links is True, the function returns also the true links. The true links are all links in the lower triangular part of the matrix. """ df = _febrl_load_data('dataset3.csv') if return_links: links = _febrl_links(df) return df, links else: return df
[ "def", "load_febrl3", "(", "return_links", "=", "False", ")", ":", "df", "=", "_febrl_load_data", "(", "'dataset3.csv'", ")", "if", "return_links", ":", "links", "=", "_febrl_links", "(", "df", ")", "return", "df", ",", "links", "else", ":", "return", "df"...
Load the FEBRL 3 dataset. The Freely Extensible Biomedical Record Linkage (Febrl) package is distributed with a dataset generator and four datasets generated with the generator. This function returns the third Febrl dataset as a :class:`pandas.DataFrame`. *"This data set contains 5000 records (2000 originals and 3000 duplicates), with a maximum of 5 duplicates based on one original record (and a Zipf distribution of duplicate records). Distribution of duplicates: 168 originals records have 5 duplicate records 161 originals records have 4 duplicate records 212 originals records have 3 duplicate records 256 originals records have 2 duplicate records 368 originals records have 1 duplicate record 1835 originals records have no duplicate record"* Parameters ---------- return_links: bool When True, the function returns also the true links. Returns ------- pandas.DataFrame A :class:`pandas.DataFrame` with Febrl dataset3.csv. When return_links is True, the function returns also the true links. The true links are all links in the lower triangular part of the matrix.
[ "Load", "the", "FEBRL", "3", "dataset", "." ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/datasets/febrl.py#L134-L174
train
206,494
J535D165/recordlinkage
recordlinkage/datasets/febrl.py
load_febrl4
def load_febrl4(return_links=False): """Load the FEBRL 4 datasets. The Freely Extensible Biomedical Record Linkage (Febrl) package is distributed with a dataset generator and four datasets generated with the generator. This function returns the fourth Febrl dataset as a :class:`pandas.DataFrame`. *"Generated as one data set with 10000 records (5000 originals and 5000 duplicates, with one duplicate per original), the originals have been split from the duplicates, into dataset4a.csv (containing the 5000 original records) and dataset4b.csv (containing the 5000 duplicate records) These two data sets can be used for testing linkage procedures."* Parameters ---------- return_links: bool When True, the function returns also the true links. Returns ------- (pandas.DataFrame, pandas.DataFrame) A :class:`pandas.DataFrame` with Febrl dataset4a.csv and a pandas dataframe with Febrl dataset4b.csv. When return_links is True, the function returns also the true links. """ df_a = _febrl_load_data('dataset4a.csv') df_b = _febrl_load_data('dataset4b.csv') if return_links: links = pandas.MultiIndex.from_arrays([ ["rec-{}-org".format(i) for i in range(0, 5000)], ["rec-{}-dup-0".format(i) for i in range(0, 5000)]] ) return df_a, df_b, links else: return df_a, df_b
python
def load_febrl4(return_links=False): """Load the FEBRL 4 datasets. The Freely Extensible Biomedical Record Linkage (Febrl) package is distributed with a dataset generator and four datasets generated with the generator. This function returns the fourth Febrl dataset as a :class:`pandas.DataFrame`. *"Generated as one data set with 10000 records (5000 originals and 5000 duplicates, with one duplicate per original), the originals have been split from the duplicates, into dataset4a.csv (containing the 5000 original records) and dataset4b.csv (containing the 5000 duplicate records) These two data sets can be used for testing linkage procedures."* Parameters ---------- return_links: bool When True, the function returns also the true links. Returns ------- (pandas.DataFrame, pandas.DataFrame) A :class:`pandas.DataFrame` with Febrl dataset4a.csv and a pandas dataframe with Febrl dataset4b.csv. When return_links is True, the function returns also the true links. """ df_a = _febrl_load_data('dataset4a.csv') df_b = _febrl_load_data('dataset4b.csv') if return_links: links = pandas.MultiIndex.from_arrays([ ["rec-{}-org".format(i) for i in range(0, 5000)], ["rec-{}-dup-0".format(i) for i in range(0, 5000)]] ) return df_a, df_b, links else: return df_a, df_b
[ "def", "load_febrl4", "(", "return_links", "=", "False", ")", ":", "df_a", "=", "_febrl_load_data", "(", "'dataset4a.csv'", ")", "df_b", "=", "_febrl_load_data", "(", "'dataset4b.csv'", ")", "if", "return_links", ":", "links", "=", "pandas", ".", "MultiIndex", ...
Load the FEBRL 4 datasets. The Freely Extensible Biomedical Record Linkage (Febrl) package is distributed with a dataset generator and four datasets generated with the generator. This function returns the fourth Febrl dataset as a :class:`pandas.DataFrame`. *"Generated as one data set with 10000 records (5000 originals and 5000 duplicates, with one duplicate per original), the originals have been split from the duplicates, into dataset4a.csv (containing the 5000 original records) and dataset4b.csv (containing the 5000 duplicate records) These two data sets can be used for testing linkage procedures."* Parameters ---------- return_links: bool When True, the function returns also the true links. Returns ------- (pandas.DataFrame, pandas.DataFrame) A :class:`pandas.DataFrame` with Febrl dataset4a.csv and a pandas dataframe with Febrl dataset4b.csv. When return_links is True, the function returns also the true links.
[ "Load", "the", "FEBRL", "4", "datasets", "." ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/datasets/febrl.py#L177-L217
train
206,495
J535D165/recordlinkage
recordlinkage/datasets/external.py
load_krebsregister
def load_krebsregister(block=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], missing_values=None, shuffle=True): """Load the Krebsregister dataset. This dataset of comparison patterns was obtained in a epidemiological cancer study in Germany. The comparison patterns were created by the Institute for Medical Biostatistics, Epidemiology and Informatics (IMBEI) and the University Medical Center of Johannes Gutenberg University (Mainz, Germany). The dataset is available for research online. "The records represent individual data including first and family name, sex, date of birth and postal code, which were collected through iterative insertions in the course of several years. The comparison patterns in this data set are based on a sample of 100.000 records dating from 2005 to 2008. Data pairs were classified as "match" or "non-match" during an extensive manual review where several documentarists were involved. The resulting classification formed the basis for assessing the quality of the registry's own record linkage procedure. In order to limit the amount of patterns a blocking procedure was applied, which selects only record pairs that meet specific agreement conditions. The results of the following six blocking iterations were merged together: - Phonetic equality of first name and family name, equality of date of birth. - Phonetic equality of first name, equality of day of birth. - Phonetic equality of first name, equality of month of birth. - Phonetic equality of first name, equality of year of birth. - Equality of complete date of birth. - Phonetic equality of family name, equality of sex. This procedure resulted in 5.749.132 record pairs, of which 20.931 are matches. The data set is split into 10 blocks of (approximately) equal size and ratio of matches to non-matches." Parameters ---------- block : int, list An integer or a list with integers between 1 and 10. The blocks are the blocks explained in the description. missing_values : object, int, float The value of the missing values. Default NaN. shuffle : bool Shuffle the record pairs. Default True. Returns ------- (pandas.DataFrame, pandas.MultiIndex) A pandas.DataFrame with comparison vectors and a pandas.MultiIndex with the indices of the matches. """ # If the data is not found, download it. for i in range(1, 11): filepath = os.path.join(os.path.dirname(__file__), 'krebsregister', 'block_{}.zip'.format(i)) if not os.path.exists(filepath): _download_krebsregister() break if isinstance(block, (list, tuple)): data = pandas.concat([_krebsregister_block(bl) for bl in block]) else: data = _krebsregister_block(block) if shuffle: data = data.sample(frac=1, random_state=535) match_index = data.index[data['is_match']] del data['is_match'] if pandas.notnull(missing_values): data.fillna(missing_values, inplace=True) return data, match_index
python
def load_krebsregister(block=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], missing_values=None, shuffle=True): """Load the Krebsregister dataset. This dataset of comparison patterns was obtained in a epidemiological cancer study in Germany. The comparison patterns were created by the Institute for Medical Biostatistics, Epidemiology and Informatics (IMBEI) and the University Medical Center of Johannes Gutenberg University (Mainz, Germany). The dataset is available for research online. "The records represent individual data including first and family name, sex, date of birth and postal code, which were collected through iterative insertions in the course of several years. The comparison patterns in this data set are based on a sample of 100.000 records dating from 2005 to 2008. Data pairs were classified as "match" or "non-match" during an extensive manual review where several documentarists were involved. The resulting classification formed the basis for assessing the quality of the registry's own record linkage procedure. In order to limit the amount of patterns a blocking procedure was applied, which selects only record pairs that meet specific agreement conditions. The results of the following six blocking iterations were merged together: - Phonetic equality of first name and family name, equality of date of birth. - Phonetic equality of first name, equality of day of birth. - Phonetic equality of first name, equality of month of birth. - Phonetic equality of first name, equality of year of birth. - Equality of complete date of birth. - Phonetic equality of family name, equality of sex. This procedure resulted in 5.749.132 record pairs, of which 20.931 are matches. The data set is split into 10 blocks of (approximately) equal size and ratio of matches to non-matches." Parameters ---------- block : int, list An integer or a list with integers between 1 and 10. The blocks are the blocks explained in the description. missing_values : object, int, float The value of the missing values. Default NaN. shuffle : bool Shuffle the record pairs. Default True. Returns ------- (pandas.DataFrame, pandas.MultiIndex) A pandas.DataFrame with comparison vectors and a pandas.MultiIndex with the indices of the matches. """ # If the data is not found, download it. for i in range(1, 11): filepath = os.path.join(os.path.dirname(__file__), 'krebsregister', 'block_{}.zip'.format(i)) if not os.path.exists(filepath): _download_krebsregister() break if isinstance(block, (list, tuple)): data = pandas.concat([_krebsregister_block(bl) for bl in block]) else: data = _krebsregister_block(block) if shuffle: data = data.sample(frac=1, random_state=535) match_index = data.index[data['is_match']] del data['is_match'] if pandas.notnull(missing_values): data.fillna(missing_values, inplace=True) return data, match_index
[ "def", "load_krebsregister", "(", "block", "=", "[", "1", ",", "2", ",", "3", ",", "4", ",", "5", ",", "6", ",", "7", ",", "8", ",", "9", ",", "10", "]", ",", "missing_values", "=", "None", ",", "shuffle", "=", "True", ")", ":", "# If the data ...
Load the Krebsregister dataset. This dataset of comparison patterns was obtained in a epidemiological cancer study in Germany. The comparison patterns were created by the Institute for Medical Biostatistics, Epidemiology and Informatics (IMBEI) and the University Medical Center of Johannes Gutenberg University (Mainz, Germany). The dataset is available for research online. "The records represent individual data including first and family name, sex, date of birth and postal code, which were collected through iterative insertions in the course of several years. The comparison patterns in this data set are based on a sample of 100.000 records dating from 2005 to 2008. Data pairs were classified as "match" or "non-match" during an extensive manual review where several documentarists were involved. The resulting classification formed the basis for assessing the quality of the registry's own record linkage procedure. In order to limit the amount of patterns a blocking procedure was applied, which selects only record pairs that meet specific agreement conditions. The results of the following six blocking iterations were merged together: - Phonetic equality of first name and family name, equality of date of birth. - Phonetic equality of first name, equality of day of birth. - Phonetic equality of first name, equality of month of birth. - Phonetic equality of first name, equality of year of birth. - Equality of complete date of birth. - Phonetic equality of family name, equality of sex. This procedure resulted in 5.749.132 record pairs, of which 20.931 are matches. The data set is split into 10 blocks of (approximately) equal size and ratio of matches to non-matches." Parameters ---------- block : int, list An integer or a list with integers between 1 and 10. The blocks are the blocks explained in the description. missing_values : object, int, float The value of the missing values. Default NaN. shuffle : bool Shuffle the record pairs. Default True. Returns ------- (pandas.DataFrame, pandas.MultiIndex) A pandas.DataFrame with comparison vectors and a pandas.MultiIndex with the indices of the matches.
[ "Load", "the", "Krebsregister", "dataset", "." ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/datasets/external.py#L10-L94
train
206,496
J535D165/recordlinkage
recordlinkage/preprocessing/encoding.py
phonetic
def phonetic(s, method, concat=True, encoding='utf-8', decode_error='strict'): """Convert names or strings into phonetic codes. The implemented algorithms are `soundex <https://en.wikipedia.org/wiki/Soundex>`_, `nysiis <https://en.wikipedia.org/wiki/New_York_State_Identification_and_ Intelligence_System>`_, `metaphone <https://en.wikipedia.org/wiki/Metaphone>`_ or `match_rating <https://en.wikipedia.org/wiki/Match_rating_approach>`_. Parameters ---------- s : pandas.Series A pandas.Series with string values (often names) to encode. method: str The algorithm that is used to phonetically encode the values. The possible options are "soundex", "nysiis", "metaphone" or "match_rating". concat: bool, optional Remove whitespace before phonetic encoding. encoding: str, optional If bytes are given, this encoding is used to decode. Default is 'utf-8'. decode_error: {'strict', 'ignore', 'replace'}, optional Instruction on what to do if a byte Series is given that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. Returns ------- pandas.Series A Series with phonetic encoded values. """ # encoding if sys.version_info[0] == 2: s = s.apply( lambda x: x.decode(encoding, decode_error) if type(x) == bytes else x) if concat: s = s.str.replace(r"[\-\_\s]", "") for alg in _phonetic_algorithms: if method in alg['argument_names']: phonetic_callback = alg['callback'] break else: raise ValueError("The algorithm '{}' is not known.".format(method)) return s.str.upper().apply( lambda x: phonetic_callback(x) if pandas.notnull(x) else np.nan )
python
def phonetic(s, method, concat=True, encoding='utf-8', decode_error='strict'): """Convert names or strings into phonetic codes. The implemented algorithms are `soundex <https://en.wikipedia.org/wiki/Soundex>`_, `nysiis <https://en.wikipedia.org/wiki/New_York_State_Identification_and_ Intelligence_System>`_, `metaphone <https://en.wikipedia.org/wiki/Metaphone>`_ or `match_rating <https://en.wikipedia.org/wiki/Match_rating_approach>`_. Parameters ---------- s : pandas.Series A pandas.Series with string values (often names) to encode. method: str The algorithm that is used to phonetically encode the values. The possible options are "soundex", "nysiis", "metaphone" or "match_rating". concat: bool, optional Remove whitespace before phonetic encoding. encoding: str, optional If bytes are given, this encoding is used to decode. Default is 'utf-8'. decode_error: {'strict', 'ignore', 'replace'}, optional Instruction on what to do if a byte Series is given that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. Returns ------- pandas.Series A Series with phonetic encoded values. """ # encoding if sys.version_info[0] == 2: s = s.apply( lambda x: x.decode(encoding, decode_error) if type(x) == bytes else x) if concat: s = s.str.replace(r"[\-\_\s]", "") for alg in _phonetic_algorithms: if method in alg['argument_names']: phonetic_callback = alg['callback'] break else: raise ValueError("The algorithm '{}' is not known.".format(method)) return s.str.upper().apply( lambda x: phonetic_callback(x) if pandas.notnull(x) else np.nan )
[ "def", "phonetic", "(", "s", ",", "method", ",", "concat", "=", "True", ",", "encoding", "=", "'utf-8'", ",", "decode_error", "=", "'strict'", ")", ":", "# encoding", "if", "sys", ".", "version_info", "[", "0", "]", "==", "2", ":", "s", "=", "s", "...
Convert names or strings into phonetic codes. The implemented algorithms are `soundex <https://en.wikipedia.org/wiki/Soundex>`_, `nysiis <https://en.wikipedia.org/wiki/New_York_State_Identification_and_ Intelligence_System>`_, `metaphone <https://en.wikipedia.org/wiki/Metaphone>`_ or `match_rating <https://en.wikipedia.org/wiki/Match_rating_approach>`_. Parameters ---------- s : pandas.Series A pandas.Series with string values (often names) to encode. method: str The algorithm that is used to phonetically encode the values. The possible options are "soundex", "nysiis", "metaphone" or "match_rating". concat: bool, optional Remove whitespace before phonetic encoding. encoding: str, optional If bytes are given, this encoding is used to decode. Default is 'utf-8'. decode_error: {'strict', 'ignore', 'replace'}, optional Instruction on what to do if a byte Series is given that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. Returns ------- pandas.Series A Series with phonetic encoded values.
[ "Convert", "names", "or", "strings", "into", "phonetic", "codes", "." ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/preprocessing/encoding.py#L37-L91
train
206,497
J535D165/recordlinkage
recordlinkage/api.py
Index.block
def block(self, *args, **kwargs): """Add a block index. Shortcut of :class:`recordlinkage.index.Block`:: from recordlinkage.index import Block indexer = recordlinkage.Index() indexer.add(Block()) """ indexer = Block(*args, **kwargs) self.add(indexer) return self
python
def block(self, *args, **kwargs): """Add a block index. Shortcut of :class:`recordlinkage.index.Block`:: from recordlinkage.index import Block indexer = recordlinkage.Index() indexer.add(Block()) """ indexer = Block(*args, **kwargs) self.add(indexer) return self
[ "def", "block", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "indexer", "=", "Block", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "add", "(", "indexer", ")", "return", "self" ]
Add a block index. Shortcut of :class:`recordlinkage.index.Block`:: from recordlinkage.index import Block indexer = recordlinkage.Index() indexer.add(Block())
[ "Add", "a", "block", "index", "." ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/api.py#L42-L56
train
206,498
J535D165/recordlinkage
recordlinkage/api.py
Index.sortedneighbourhood
def sortedneighbourhood(self, *args, **kwargs): """Add a Sorted Neighbourhood Index. Shortcut of :class:`recordlinkage.index.SortedNeighbourhood`:: from recordlinkage.index import SortedNeighbourhood indexer = recordlinkage.Index() indexer.add(SortedNeighbourhood()) """ indexer = SortedNeighbourhood(*args, **kwargs) self.add(indexer) return self
python
def sortedneighbourhood(self, *args, **kwargs): """Add a Sorted Neighbourhood Index. Shortcut of :class:`recordlinkage.index.SortedNeighbourhood`:: from recordlinkage.index import SortedNeighbourhood indexer = recordlinkage.Index() indexer.add(SortedNeighbourhood()) """ indexer = SortedNeighbourhood(*args, **kwargs) self.add(indexer) return self
[ "def", "sortedneighbourhood", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "indexer", "=", "SortedNeighbourhood", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "add", "(", "indexer", ")", "return", "self" ]
Add a Sorted Neighbourhood Index. Shortcut of :class:`recordlinkage.index.SortedNeighbourhood`:: from recordlinkage.index import SortedNeighbourhood indexer = recordlinkage.Index() indexer.add(SortedNeighbourhood())
[ "Add", "a", "Sorted", "Neighbourhood", "Index", "." ]
87a5f4af904e0834047cd07ff1c70146b1e6d693
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/api.py#L58-L72
train
206,499