repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/collection.py
CollectionReference.document
def document(self, document_id=None): """Create a sub-document underneath the current collection. Args: document_id (Optional[str]): The document identifier within the current collection. If not provided, will default to a random 20 character string composed of digits, uppercase and lowercase and letters. Returns: ~.firestore_v1beta1.document.DocumentReference: The child document. """ if document_id is None: document_id = _auto_id() child_path = self._path + (document_id,) return self._client.document(*child_path)
python
def document(self, document_id=None): """Create a sub-document underneath the current collection. Args: document_id (Optional[str]): The document identifier within the current collection. If not provided, will default to a random 20 character string composed of digits, uppercase and lowercase and letters. Returns: ~.firestore_v1beta1.document.DocumentReference: The child document. """ if document_id is None: document_id = _auto_id() child_path = self._path + (document_id,) return self._client.document(*child_path)
[ "def", "document", "(", "self", ",", "document_id", "=", "None", ")", ":", "if", "document_id", "is", "None", ":", "document_id", "=", "_auto_id", "(", ")", "child_path", "=", "self", ".", "_path", "+", "(", "document_id", ",", ")", "return", "self", ".", "_client", ".", "document", "(", "*", "child_path", ")" ]
Create a sub-document underneath the current collection. Args: document_id (Optional[str]): The document identifier within the current collection. If not provided, will default to a random 20 character string composed of digits, uppercase and lowercase and letters. Returns: ~.firestore_v1beta1.document.DocumentReference: The child document.
[ "Create", "a", "sub", "-", "document", "underneath", "the", "current", "collection", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/collection.py#L94-L111
train
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/collection.py
CollectionReference._parent_info
def _parent_info(self): """Get fully-qualified parent path and prefix for this collection. Returns: Tuple[str, str]: Pair of * the fully-qualified (with database and project) path to the parent of this collection (will either be the database path or a document path). * the prefix to a document in this collection. """ parent_doc = self.parent if parent_doc is None: parent_path = _helpers.DOCUMENT_PATH_DELIMITER.join( (self._client._database_string, "documents") ) else: parent_path = parent_doc._document_path expected_prefix = _helpers.DOCUMENT_PATH_DELIMITER.join((parent_path, self.id)) return parent_path, expected_prefix
python
def _parent_info(self): """Get fully-qualified parent path and prefix for this collection. Returns: Tuple[str, str]: Pair of * the fully-qualified (with database and project) path to the parent of this collection (will either be the database path or a document path). * the prefix to a document in this collection. """ parent_doc = self.parent if parent_doc is None: parent_path = _helpers.DOCUMENT_PATH_DELIMITER.join( (self._client._database_string, "documents") ) else: parent_path = parent_doc._document_path expected_prefix = _helpers.DOCUMENT_PATH_DELIMITER.join((parent_path, self.id)) return parent_path, expected_prefix
[ "def", "_parent_info", "(", "self", ")", ":", "parent_doc", "=", "self", ".", "parent", "if", "parent_doc", "is", "None", ":", "parent_path", "=", "_helpers", ".", "DOCUMENT_PATH_DELIMITER", ".", "join", "(", "(", "self", ".", "_client", ".", "_database_string", ",", "\"documents\"", ")", ")", "else", ":", "parent_path", "=", "parent_doc", ".", "_document_path", "expected_prefix", "=", "_helpers", ".", "DOCUMENT_PATH_DELIMITER", ".", "join", "(", "(", "parent_path", ",", "self", ".", "id", ")", ")", "return", "parent_path", ",", "expected_prefix" ]
Get fully-qualified parent path and prefix for this collection. Returns: Tuple[str, str]: Pair of * the fully-qualified (with database and project) path to the parent of this collection (will either be the database path or a document path). * the prefix to a document in this collection.
[ "Get", "fully", "-", "qualified", "parent", "path", "and", "prefix", "for", "this", "collection", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/collection.py#L113-L133
train
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/collection.py
CollectionReference.add
def add(self, document_data, document_id=None): """Create a document in the Firestore database with the provided data. Args: document_data (dict): Property names and values to use for creating the document. document_id (Optional[str]): The document identifier within the current collection. If not provided, an ID will be automatically assigned by the server (the assigned ID will be a random 20 character string composed of digits, uppercase and lowercase letters). Returns: Tuple[google.protobuf.timestamp_pb2.Timestamp, \ ~.firestore_v1beta1.document.DocumentReference]: Pair of * The ``update_time`` when the document was created (or overwritten). * A document reference for the created document. Raises: ~google.cloud.exceptions.Conflict: If ``document_id`` is provided and the document already exists. """ if document_id is None: parent_path, expected_prefix = self._parent_info() document_pb = document_pb2.Document() created_document_pb = self._client._firestore_api.create_document( parent_path, collection_id=self.id, document_id=None, document=document_pb, mask=None, metadata=self._client._rpc_metadata, ) new_document_id = _helpers.get_doc_id(created_document_pb, expected_prefix) document_ref = self.document(new_document_id) set_result = document_ref.set(document_data) return set_result.update_time, document_ref else: document_ref = self.document(document_id) write_result = document_ref.create(document_data) return write_result.update_time, document_ref
python
def add(self, document_data, document_id=None): """Create a document in the Firestore database with the provided data. Args: document_data (dict): Property names and values to use for creating the document. document_id (Optional[str]): The document identifier within the current collection. If not provided, an ID will be automatically assigned by the server (the assigned ID will be a random 20 character string composed of digits, uppercase and lowercase letters). Returns: Tuple[google.protobuf.timestamp_pb2.Timestamp, \ ~.firestore_v1beta1.document.DocumentReference]: Pair of * The ``update_time`` when the document was created (or overwritten). * A document reference for the created document. Raises: ~google.cloud.exceptions.Conflict: If ``document_id`` is provided and the document already exists. """ if document_id is None: parent_path, expected_prefix = self._parent_info() document_pb = document_pb2.Document() created_document_pb = self._client._firestore_api.create_document( parent_path, collection_id=self.id, document_id=None, document=document_pb, mask=None, metadata=self._client._rpc_metadata, ) new_document_id = _helpers.get_doc_id(created_document_pb, expected_prefix) document_ref = self.document(new_document_id) set_result = document_ref.set(document_data) return set_result.update_time, document_ref else: document_ref = self.document(document_id) write_result = document_ref.create(document_data) return write_result.update_time, document_ref
[ "def", "add", "(", "self", ",", "document_data", ",", "document_id", "=", "None", ")", ":", "if", "document_id", "is", "None", ":", "parent_path", ",", "expected_prefix", "=", "self", ".", "_parent_info", "(", ")", "document_pb", "=", "document_pb2", ".", "Document", "(", ")", "created_document_pb", "=", "self", ".", "_client", ".", "_firestore_api", ".", "create_document", "(", "parent_path", ",", "collection_id", "=", "self", ".", "id", ",", "document_id", "=", "None", ",", "document", "=", "document_pb", ",", "mask", "=", "None", ",", "metadata", "=", "self", ".", "_client", ".", "_rpc_metadata", ",", ")", "new_document_id", "=", "_helpers", ".", "get_doc_id", "(", "created_document_pb", ",", "expected_prefix", ")", "document_ref", "=", "self", ".", "document", "(", "new_document_id", ")", "set_result", "=", "document_ref", ".", "set", "(", "document_data", ")", "return", "set_result", ".", "update_time", ",", "document_ref", "else", ":", "document_ref", "=", "self", ".", "document", "(", "document_id", ")", "write_result", "=", "document_ref", ".", "create", "(", "document_data", ")", "return", "write_result", ".", "update_time", ",", "document_ref" ]
Create a document in the Firestore database with the provided data. Args: document_data (dict): Property names and values to use for creating the document. document_id (Optional[str]): The document identifier within the current collection. If not provided, an ID will be automatically assigned by the server (the assigned ID will be a random 20 character string composed of digits, uppercase and lowercase letters). Returns: Tuple[google.protobuf.timestamp_pb2.Timestamp, \ ~.firestore_v1beta1.document.DocumentReference]: Pair of * The ``update_time`` when the document was created (or overwritten). * A document reference for the created document. Raises: ~google.cloud.exceptions.Conflict: If ``document_id`` is provided and the document already exists.
[ "Create", "a", "document", "in", "the", "Firestore", "database", "with", "the", "provided", "data", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/collection.py#L135-L180
train
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/collection.py
CollectionReference.list_documents
def list_documents(self, page_size=None): """List all subdocuments of the current collection. Args: page_size (Optional[int]]): The maximum number of documents in each page of results from this request. Non-positive values are ignored. Defaults to a sensible value set by the API. Returns: Sequence[~.firestore_v1beta1.collection.DocumentReference]: iterator of subdocuments of the current collection. If the collection does not exist at the time of `snapshot`, the iterator will be empty """ parent, _ = self._parent_info() iterator = self._client._firestore_api.list_documents( parent, self.id, page_size=page_size, show_missing=True, metadata=self._client._rpc_metadata, ) iterator.collection = self iterator.item_to_value = _item_to_document_ref return iterator
python
def list_documents(self, page_size=None): """List all subdocuments of the current collection. Args: page_size (Optional[int]]): The maximum number of documents in each page of results from this request. Non-positive values are ignored. Defaults to a sensible value set by the API. Returns: Sequence[~.firestore_v1beta1.collection.DocumentReference]: iterator of subdocuments of the current collection. If the collection does not exist at the time of `snapshot`, the iterator will be empty """ parent, _ = self._parent_info() iterator = self._client._firestore_api.list_documents( parent, self.id, page_size=page_size, show_missing=True, metadata=self._client._rpc_metadata, ) iterator.collection = self iterator.item_to_value = _item_to_document_ref return iterator
[ "def", "list_documents", "(", "self", ",", "page_size", "=", "None", ")", ":", "parent", ",", "_", "=", "self", ".", "_parent_info", "(", ")", "iterator", "=", "self", ".", "_client", ".", "_firestore_api", ".", "list_documents", "(", "parent", ",", "self", ".", "id", ",", "page_size", "=", "page_size", ",", "show_missing", "=", "True", ",", "metadata", "=", "self", ".", "_client", ".", "_rpc_metadata", ",", ")", "iterator", ".", "collection", "=", "self", "iterator", ".", "item_to_value", "=", "_item_to_document_ref", "return", "iterator" ]
List all subdocuments of the current collection. Args: page_size (Optional[int]]): The maximum number of documents in each page of results from this request. Non-positive values are ignored. Defaults to a sensible value set by the API. Returns: Sequence[~.firestore_v1beta1.collection.DocumentReference]: iterator of subdocuments of the current collection. If the collection does not exist at the time of `snapshot`, the iterator will be empty
[ "List", "all", "subdocuments", "of", "the", "current", "collection", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/collection.py#L182-L207
train
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/collection.py
CollectionReference.select
def select(self, field_paths): """Create a "select" query with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.select` for more information on this method. Args: field_paths (Iterable[str, ...]): An iterable of field paths (``.``-delimited list of field names) to use as a projection of document fields in the query results. Returns: ~.firestore_v1beta1.query.Query: A "projected" query. """ query = query_mod.Query(self) return query.select(field_paths)
python
def select(self, field_paths): """Create a "select" query with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.select` for more information on this method. Args: field_paths (Iterable[str, ...]): An iterable of field paths (``.``-delimited list of field names) to use as a projection of document fields in the query results. Returns: ~.firestore_v1beta1.query.Query: A "projected" query. """ query = query_mod.Query(self) return query.select(field_paths)
[ "def", "select", "(", "self", ",", "field_paths", ")", ":", "query", "=", "query_mod", ".", "Query", "(", "self", ")", "return", "query", ".", "select", "(", "field_paths", ")" ]
Create a "select" query with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.select` for more information on this method. Args: field_paths (Iterable[str, ...]): An iterable of field paths (``.``-delimited list of field names) to use as a projection of document fields in the query results. Returns: ~.firestore_v1beta1.query.Query: A "projected" query.
[ "Create", "a", "select", "query", "with", "this", "collection", "as", "parent", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/collection.py#L209-L225
train
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/collection.py
CollectionReference.where
def where(self, field_path, op_string, value): """Create a "where" query with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.where` for more information on this method. Args: field_path (str): A field path (``.``-delimited list of field names) for the field to filter on. op_string (str): A comparison operation in the form of a string. Acceptable values are ``<``, ``<=``, ``==``, ``>=`` and ``>``. value (Any): The value to compare the field against in the filter. If ``value`` is :data:`None` or a NaN, then ``==`` is the only allowed operation. Returns: ~.firestore_v1beta1.query.Query: A filtered query. """ query = query_mod.Query(self) return query.where(field_path, op_string, value)
python
def where(self, field_path, op_string, value): """Create a "where" query with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.where` for more information on this method. Args: field_path (str): A field path (``.``-delimited list of field names) for the field to filter on. op_string (str): A comparison operation in the form of a string. Acceptable values are ``<``, ``<=``, ``==``, ``>=`` and ``>``. value (Any): The value to compare the field against in the filter. If ``value`` is :data:`None` or a NaN, then ``==`` is the only allowed operation. Returns: ~.firestore_v1beta1.query.Query: A filtered query. """ query = query_mod.Query(self) return query.where(field_path, op_string, value)
[ "def", "where", "(", "self", ",", "field_path", ",", "op_string", ",", "value", ")", ":", "query", "=", "query_mod", ".", "Query", "(", "self", ")", "return", "query", ".", "where", "(", "field_path", ",", "op_string", ",", "value", ")" ]
Create a "where" query with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.where` for more information on this method. Args: field_path (str): A field path (``.``-delimited list of field names) for the field to filter on. op_string (str): A comparison operation in the form of a string. Acceptable values are ``<``, ``<=``, ``==``, ``>=`` and ``>``. value (Any): The value to compare the field against in the filter. If ``value`` is :data:`None` or a NaN, then ``==`` is the only allowed operation. Returns: ~.firestore_v1beta1.query.Query: A filtered query.
[ "Create", "a", "where", "query", "with", "this", "collection", "as", "parent", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/collection.py#L227-L248
train
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/collection.py
CollectionReference.order_by
def order_by(self, field_path, **kwargs): """Create an "order by" query with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.order_by` for more information on this method. Args: field_path (str): A field path (``.``-delimited list of field names) on which to order the query results. kwargs (Dict[str, Any]): The keyword arguments to pass along to the query. The only supported keyword is ``direction``, see :meth:`~.firestore_v1beta1.query.Query.order_by` for more information. Returns: ~.firestore_v1beta1.query.Query: An "order by" query. """ query = query_mod.Query(self) return query.order_by(field_path, **kwargs)
python
def order_by(self, field_path, **kwargs): """Create an "order by" query with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.order_by` for more information on this method. Args: field_path (str): A field path (``.``-delimited list of field names) on which to order the query results. kwargs (Dict[str, Any]): The keyword arguments to pass along to the query. The only supported keyword is ``direction``, see :meth:`~.firestore_v1beta1.query.Query.order_by` for more information. Returns: ~.firestore_v1beta1.query.Query: An "order by" query. """ query = query_mod.Query(self) return query.order_by(field_path, **kwargs)
[ "def", "order_by", "(", "self", ",", "field_path", ",", "*", "*", "kwargs", ")", ":", "query", "=", "query_mod", ".", "Query", "(", "self", ")", "return", "query", ".", "order_by", "(", "field_path", ",", "*", "*", "kwargs", ")" ]
Create an "order by" query with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.order_by` for more information on this method. Args: field_path (str): A field path (``.``-delimited list of field names) on which to order the query results. kwargs (Dict[str, Any]): The keyword arguments to pass along to the query. The only supported keyword is ``direction``, see :meth:`~.firestore_v1beta1.query.Query.order_by` for more information. Returns: ~.firestore_v1beta1.query.Query: An "order by" query.
[ "Create", "an", "order", "by", "query", "with", "this", "collection", "as", "parent", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/collection.py#L250-L269
train
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/collection.py
CollectionReference.limit
def limit(self, count): """Create a limited query with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.limit` for more information on this method. Args: count (int): Maximum number of documents to return that match the query. Returns: ~.firestore_v1beta1.query.Query: A limited query. """ query = query_mod.Query(self) return query.limit(count)
python
def limit(self, count): """Create a limited query with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.limit` for more information on this method. Args: count (int): Maximum number of documents to return that match the query. Returns: ~.firestore_v1beta1.query.Query: A limited query. """ query = query_mod.Query(self) return query.limit(count)
[ "def", "limit", "(", "self", ",", "count", ")", ":", "query", "=", "query_mod", ".", "Query", "(", "self", ")", "return", "query", ".", "limit", "(", "count", ")" ]
Create a limited query with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.limit` for more information on this method. Args: count (int): Maximum number of documents to return that match the query. Returns: ~.firestore_v1beta1.query.Query: A limited query.
[ "Create", "a", "limited", "query", "with", "this", "collection", "as", "parent", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/collection.py#L271-L286
train
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/collection.py
CollectionReference.offset
def offset(self, num_to_skip): """Skip to an offset in a query with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.offset` for more information on this method. Args: num_to_skip (int): The number of results to skip at the beginning of query results. (Must be non-negative.) Returns: ~.firestore_v1beta1.query.Query: An offset query. """ query = query_mod.Query(self) return query.offset(num_to_skip)
python
def offset(self, num_to_skip): """Skip to an offset in a query with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.offset` for more information on this method. Args: num_to_skip (int): The number of results to skip at the beginning of query results. (Must be non-negative.) Returns: ~.firestore_v1beta1.query.Query: An offset query. """ query = query_mod.Query(self) return query.offset(num_to_skip)
[ "def", "offset", "(", "self", ",", "num_to_skip", ")", ":", "query", "=", "query_mod", ".", "Query", "(", "self", ")", "return", "query", ".", "offset", "(", "num_to_skip", ")" ]
Skip to an offset in a query with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.offset` for more information on this method. Args: num_to_skip (int): The number of results to skip at the beginning of query results. (Must be non-negative.) Returns: ~.firestore_v1beta1.query.Query: An offset query.
[ "Skip", "to", "an", "offset", "in", "a", "query", "with", "this", "collection", "as", "parent", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/collection.py#L288-L303
train
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/collection.py
CollectionReference.start_at
def start_at(self, document_fields): """Start query at a cursor with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.start_at` for more information on this method. Args: document_fields (Union[~.firestore_v1beta1.\ document.DocumentSnapshot, dict, list, tuple]): a document snapshot or a dictionary/list/tuple of fields representing a query results cursor. A cursor is a collection of values that represent a position in a query result set. Returns: ~.firestore_v1beta1.query.Query: A query with cursor. """ query = query_mod.Query(self) return query.start_at(document_fields)
python
def start_at(self, document_fields): """Start query at a cursor with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.start_at` for more information on this method. Args: document_fields (Union[~.firestore_v1beta1.\ document.DocumentSnapshot, dict, list, tuple]): a document snapshot or a dictionary/list/tuple of fields representing a query results cursor. A cursor is a collection of values that represent a position in a query result set. Returns: ~.firestore_v1beta1.query.Query: A query with cursor. """ query = query_mod.Query(self) return query.start_at(document_fields)
[ "def", "start_at", "(", "self", ",", "document_fields", ")", ":", "query", "=", "query_mod", ".", "Query", "(", "self", ")", "return", "query", ".", "start_at", "(", "document_fields", ")" ]
Start query at a cursor with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.start_at` for more information on this method. Args: document_fields (Union[~.firestore_v1beta1.\ document.DocumentSnapshot, dict, list, tuple]): a document snapshot or a dictionary/list/tuple of fields representing a query results cursor. A cursor is a collection of values that represent a position in a query result set. Returns: ~.firestore_v1beta1.query.Query: A query with cursor.
[ "Start", "query", "at", "a", "cursor", "with", "this", "collection", "as", "parent", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/collection.py#L305-L323
train
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/collection.py
CollectionReference.start_after
def start_after(self, document_fields): """Start query after a cursor with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.start_after` for more information on this method. Args: document_fields (Union[~.firestore_v1beta1.\ document.DocumentSnapshot, dict, list, tuple]): a document snapshot or a dictionary/list/tuple of fields representing a query results cursor. A cursor is a collection of values that represent a position in a query result set. Returns: ~.firestore_v1beta1.query.Query: A query with cursor. """ query = query_mod.Query(self) return query.start_after(document_fields)
python
def start_after(self, document_fields): """Start query after a cursor with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.start_after` for more information on this method. Args: document_fields (Union[~.firestore_v1beta1.\ document.DocumentSnapshot, dict, list, tuple]): a document snapshot or a dictionary/list/tuple of fields representing a query results cursor. A cursor is a collection of values that represent a position in a query result set. Returns: ~.firestore_v1beta1.query.Query: A query with cursor. """ query = query_mod.Query(self) return query.start_after(document_fields)
[ "def", "start_after", "(", "self", ",", "document_fields", ")", ":", "query", "=", "query_mod", ".", "Query", "(", "self", ")", "return", "query", ".", "start_after", "(", "document_fields", ")" ]
Start query after a cursor with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.start_after` for more information on this method. Args: document_fields (Union[~.firestore_v1beta1.\ document.DocumentSnapshot, dict, list, tuple]): a document snapshot or a dictionary/list/tuple of fields representing a query results cursor. A cursor is a collection of values that represent a position in a query result set. Returns: ~.firestore_v1beta1.query.Query: A query with cursor.
[ "Start", "query", "after", "a", "cursor", "with", "this", "collection", "as", "parent", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/collection.py#L325-L343
train
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/collection.py
CollectionReference.end_before
def end_before(self, document_fields): """End query before a cursor with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.end_before` for more information on this method. Args: document_fields (Union[~.firestore_v1beta1.\ document.DocumentSnapshot, dict, list, tuple]): a document snapshot or a dictionary/list/tuple of fields representing a query results cursor. A cursor is a collection of values that represent a position in a query result set. Returns: ~.firestore_v1beta1.query.Query: A query with cursor. """ query = query_mod.Query(self) return query.end_before(document_fields)
python
def end_before(self, document_fields): """End query before a cursor with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.end_before` for more information on this method. Args: document_fields (Union[~.firestore_v1beta1.\ document.DocumentSnapshot, dict, list, tuple]): a document snapshot or a dictionary/list/tuple of fields representing a query results cursor. A cursor is a collection of values that represent a position in a query result set. Returns: ~.firestore_v1beta1.query.Query: A query with cursor. """ query = query_mod.Query(self) return query.end_before(document_fields)
[ "def", "end_before", "(", "self", ",", "document_fields", ")", ":", "query", "=", "query_mod", ".", "Query", "(", "self", ")", "return", "query", ".", "end_before", "(", "document_fields", ")" ]
End query before a cursor with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.end_before` for more information on this method. Args: document_fields (Union[~.firestore_v1beta1.\ document.DocumentSnapshot, dict, list, tuple]): a document snapshot or a dictionary/list/tuple of fields representing a query results cursor. A cursor is a collection of values that represent a position in a query result set. Returns: ~.firestore_v1beta1.query.Query: A query with cursor.
[ "End", "query", "before", "a", "cursor", "with", "this", "collection", "as", "parent", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/collection.py#L345-L363
train
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/collection.py
CollectionReference.end_at
def end_at(self, document_fields): """End query at a cursor with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.end_at` for more information on this method. Args: document_fields (Union[~.firestore_v1beta1.\ document.DocumentSnapshot, dict, list, tuple]): a document snapshot or a dictionary/list/tuple of fields representing a query results cursor. A cursor is a collection of values that represent a position in a query result set. Returns: ~.firestore_v1beta1.query.Query: A query with cursor. """ query = query_mod.Query(self) return query.end_at(document_fields)
python
def end_at(self, document_fields): """End query at a cursor with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.end_at` for more information on this method. Args: document_fields (Union[~.firestore_v1beta1.\ document.DocumentSnapshot, dict, list, tuple]): a document snapshot or a dictionary/list/tuple of fields representing a query results cursor. A cursor is a collection of values that represent a position in a query result set. Returns: ~.firestore_v1beta1.query.Query: A query with cursor. """ query = query_mod.Query(self) return query.end_at(document_fields)
[ "def", "end_at", "(", "self", ",", "document_fields", ")", ":", "query", "=", "query_mod", ".", "Query", "(", "self", ")", "return", "query", ".", "end_at", "(", "document_fields", ")" ]
End query at a cursor with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.end_at` for more information on this method. Args: document_fields (Union[~.firestore_v1beta1.\ document.DocumentSnapshot, dict, list, tuple]): a document snapshot or a dictionary/list/tuple of fields representing a query results cursor. A cursor is a collection of values that represent a position in a query result set. Returns: ~.firestore_v1beta1.query.Query: A query with cursor.
[ "End", "query", "at", "a", "cursor", "with", "this", "collection", "as", "parent", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/collection.py#L365-L383
train
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/collection.py
CollectionReference.stream
def stream(self, transaction=None): """Read the documents in this collection. This sends a ``RunQuery`` RPC and then returns an iterator which consumes each document returned in the stream of ``RunQueryResponse`` messages. .. note:: The underlying stream of responses will time out after the ``max_rpc_timeout_millis`` value set in the GAPIC client configuration for the ``RunQuery`` API. Snapshots not consumed from the iterator before that point will be lost. If a ``transaction`` is used and it already has write operations added, this method cannot be used (i.e. read-after-write is not allowed). Args: transaction (Optional[~.firestore_v1beta1.transaction.\ Transaction]): An existing transaction that the query will run in. Yields: ~.firestore_v1beta1.document.DocumentSnapshot: The next document that fulfills the query. """ query = query_mod.Query(self) return query.stream(transaction=transaction)
python
def stream(self, transaction=None): """Read the documents in this collection. This sends a ``RunQuery`` RPC and then returns an iterator which consumes each document returned in the stream of ``RunQueryResponse`` messages. .. note:: The underlying stream of responses will time out after the ``max_rpc_timeout_millis`` value set in the GAPIC client configuration for the ``RunQuery`` API. Snapshots not consumed from the iterator before that point will be lost. If a ``transaction`` is used and it already has write operations added, this method cannot be used (i.e. read-after-write is not allowed). Args: transaction (Optional[~.firestore_v1beta1.transaction.\ Transaction]): An existing transaction that the query will run in. Yields: ~.firestore_v1beta1.document.DocumentSnapshot: The next document that fulfills the query. """ query = query_mod.Query(self) return query.stream(transaction=transaction)
[ "def", "stream", "(", "self", ",", "transaction", "=", "None", ")", ":", "query", "=", "query_mod", ".", "Query", "(", "self", ")", "return", "query", ".", "stream", "(", "transaction", "=", "transaction", ")" ]
Read the documents in this collection. This sends a ``RunQuery`` RPC and then returns an iterator which consumes each document returned in the stream of ``RunQueryResponse`` messages. .. note:: The underlying stream of responses will time out after the ``max_rpc_timeout_millis`` value set in the GAPIC client configuration for the ``RunQuery`` API. Snapshots not consumed from the iterator before that point will be lost. If a ``transaction`` is used and it already has write operations added, this method cannot be used (i.e. read-after-write is not allowed). Args: transaction (Optional[~.firestore_v1beta1.transaction.\ Transaction]): An existing transaction that the query will run in. Yields: ~.firestore_v1beta1.document.DocumentSnapshot: The next document that fulfills the query.
[ "Read", "the", "documents", "in", "this", "collection", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/collection.py#L394-L422
train
googleapis/google-cloud-python
translate/google/cloud/translate_v2/client.py
Client.get_languages
def get_languages(self, target_language=None): """Get list of supported languages for translation. Response See https://cloud.google.com/translate/docs/discovering-supported-languages :type target_language: str :param target_language: (Optional) The language used to localize returned language names. Defaults to the target language on the current client. :rtype: list :returns: List of dictionaries. Each dictionary contains a supported ISO 639-1 language code (using the dictionary key ``language``). If ``target_language`` is passed, each dictionary will also contain the name of each supported language (localized to the target language). """ query_params = {} if target_language is None: target_language = self.target_language if target_language is not None: query_params["target"] = target_language response = self._connection.api_request( method="GET", path="/languages", query_params=query_params ) return response.get("data", {}).get("languages", ())
python
def get_languages(self, target_language=None): """Get list of supported languages for translation. Response See https://cloud.google.com/translate/docs/discovering-supported-languages :type target_language: str :param target_language: (Optional) The language used to localize returned language names. Defaults to the target language on the current client. :rtype: list :returns: List of dictionaries. Each dictionary contains a supported ISO 639-1 language code (using the dictionary key ``language``). If ``target_language`` is passed, each dictionary will also contain the name of each supported language (localized to the target language). """ query_params = {} if target_language is None: target_language = self.target_language if target_language is not None: query_params["target"] = target_language response = self._connection.api_request( method="GET", path="/languages", query_params=query_params ) return response.get("data", {}).get("languages", ())
[ "def", "get_languages", "(", "self", ",", "target_language", "=", "None", ")", ":", "query_params", "=", "{", "}", "if", "target_language", "is", "None", ":", "target_language", "=", "self", ".", "target_language", "if", "target_language", "is", "not", "None", ":", "query_params", "[", "\"target\"", "]", "=", "target_language", "response", "=", "self", ".", "_connection", ".", "api_request", "(", "method", "=", "\"GET\"", ",", "path", "=", "\"/languages\"", ",", "query_params", "=", "query_params", ")", "return", "response", ".", "get", "(", "\"data\"", ",", "{", "}", ")", ".", "get", "(", "\"languages\"", ",", "(", ")", ")" ]
Get list of supported languages for translation. Response See https://cloud.google.com/translate/docs/discovering-supported-languages :type target_language: str :param target_language: (Optional) The language used to localize returned language names. Defaults to the target language on the current client. :rtype: list :returns: List of dictionaries. Each dictionary contains a supported ISO 639-1 language code (using the dictionary key ``language``). If ``target_language`` is passed, each dictionary will also contain the name of each supported language (localized to the target language).
[ "Get", "list", "of", "supported", "languages", "for", "translation", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/translate/google/cloud/translate_v2/client.py#L67-L95
train
googleapis/google-cloud-python
translate/google/cloud/translate_v2/client.py
Client.detect_language
def detect_language(self, values): """Detect the language of a string or list of strings. See https://cloud.google.com/translate/docs/detecting-language :type values: str or list :param values: String or list of strings that will have language detected. :rtype: dict or list :returns: A list of dictionaries for each queried value. Each dictionary typically contains three keys * ``confidence``: The confidence in language detection, a float between 0 and 1. * ``input``: The corresponding input value. * ``language``: The detected language (as an ISO 639-1 language code). though the key ``confidence`` may not always be present. If only a single value is passed, then only a single dictionary will be returned. :raises: :class:`ValueError <exceptions.ValueError>` if the number of detections is not equal to the number of values. :class:`ValueError <exceptions.ValueError>` if a value produces a list of detections with 0 or multiple results in it. """ single_value = False if isinstance(values, six.string_types): single_value = True values = [values] data = {"q": values} response = self._connection.api_request( method="POST", path="/detect", data=data ) detections = response.get("data", {}).get("detections", ()) if len(values) != len(detections): raise ValueError( "Expected same number of values and detections", values, detections ) for index, value in enumerate(values): # Empirically, even clearly ambiguous text like "no" only returns # a single detection, so we replace the list of detections with # the single detection contained. if len(detections[index]) == 1: detections[index] = detections[index][0] else: message = ( "Expected a single detection per value, API " "returned %d" ) % (len(detections[index]),) raise ValueError(message, value, detections[index]) detections[index]["input"] = value # The ``isReliable`` field is deprecated. detections[index].pop("isReliable", None) if single_value: return detections[0] else: return detections
python
def detect_language(self, values): """Detect the language of a string or list of strings. See https://cloud.google.com/translate/docs/detecting-language :type values: str or list :param values: String or list of strings that will have language detected. :rtype: dict or list :returns: A list of dictionaries for each queried value. Each dictionary typically contains three keys * ``confidence``: The confidence in language detection, a float between 0 and 1. * ``input``: The corresponding input value. * ``language``: The detected language (as an ISO 639-1 language code). though the key ``confidence`` may not always be present. If only a single value is passed, then only a single dictionary will be returned. :raises: :class:`ValueError <exceptions.ValueError>` if the number of detections is not equal to the number of values. :class:`ValueError <exceptions.ValueError>` if a value produces a list of detections with 0 or multiple results in it. """ single_value = False if isinstance(values, six.string_types): single_value = True values = [values] data = {"q": values} response = self._connection.api_request( method="POST", path="/detect", data=data ) detections = response.get("data", {}).get("detections", ()) if len(values) != len(detections): raise ValueError( "Expected same number of values and detections", values, detections ) for index, value in enumerate(values): # Empirically, even clearly ambiguous text like "no" only returns # a single detection, so we replace the list of detections with # the single detection contained. if len(detections[index]) == 1: detections[index] = detections[index][0] else: message = ( "Expected a single detection per value, API " "returned %d" ) % (len(detections[index]),) raise ValueError(message, value, detections[index]) detections[index]["input"] = value # The ``isReliable`` field is deprecated. detections[index].pop("isReliable", None) if single_value: return detections[0] else: return detections
[ "def", "detect_language", "(", "self", ",", "values", ")", ":", "single_value", "=", "False", "if", "isinstance", "(", "values", ",", "six", ".", "string_types", ")", ":", "single_value", "=", "True", "values", "=", "[", "values", "]", "data", "=", "{", "\"q\"", ":", "values", "}", "response", "=", "self", ".", "_connection", ".", "api_request", "(", "method", "=", "\"POST\"", ",", "path", "=", "\"/detect\"", ",", "data", "=", "data", ")", "detections", "=", "response", ".", "get", "(", "\"data\"", ",", "{", "}", ")", ".", "get", "(", "\"detections\"", ",", "(", ")", ")", "if", "len", "(", "values", ")", "!=", "len", "(", "detections", ")", ":", "raise", "ValueError", "(", "\"Expected same number of values and detections\"", ",", "values", ",", "detections", ")", "for", "index", ",", "value", "in", "enumerate", "(", "values", ")", ":", "# Empirically, even clearly ambiguous text like \"no\" only returns", "# a single detection, so we replace the list of detections with", "# the single detection contained.", "if", "len", "(", "detections", "[", "index", "]", ")", "==", "1", ":", "detections", "[", "index", "]", "=", "detections", "[", "index", "]", "[", "0", "]", "else", ":", "message", "=", "(", "\"Expected a single detection per value, API \"", "\"returned %d\"", ")", "%", "(", "len", "(", "detections", "[", "index", "]", ")", ",", ")", "raise", "ValueError", "(", "message", ",", "value", ",", "detections", "[", "index", "]", ")", "detections", "[", "index", "]", "[", "\"input\"", "]", "=", "value", "# The ``isReliable`` field is deprecated.", "detections", "[", "index", "]", ".", "pop", "(", "\"isReliable\"", ",", "None", ")", "if", "single_value", ":", "return", "detections", "[", "0", "]", "else", ":", "return", "detections" ]
Detect the language of a string or list of strings. See https://cloud.google.com/translate/docs/detecting-language :type values: str or list :param values: String or list of strings that will have language detected. :rtype: dict or list :returns: A list of dictionaries for each queried value. Each dictionary typically contains three keys * ``confidence``: The confidence in language detection, a float between 0 and 1. * ``input``: The corresponding input value. * ``language``: The detected language (as an ISO 639-1 language code). though the key ``confidence`` may not always be present. If only a single value is passed, then only a single dictionary will be returned. :raises: :class:`ValueError <exceptions.ValueError>` if the number of detections is not equal to the number of values. :class:`ValueError <exceptions.ValueError>` if a value produces a list of detections with 0 or multiple results in it.
[ "Detect", "the", "language", "of", "a", "string", "or", "list", "of", "strings", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/translate/google/cloud/translate_v2/client.py#L97-L163
train
googleapis/google-cloud-python
translate/google/cloud/translate_v2/client.py
Client.translate
def translate( self, values, target_language=None, format_=None, source_language=None, customization_ids=(), model=None, ): """Translate a string or list of strings. See https://cloud.google.com/translate/docs/translating-text :type values: str or list :param values: String or list of strings to translate. :type target_language: str :param target_language: The language to translate results into. This is required by the API and defaults to the target language of the current instance. :type format_: str :param format_: (Optional) One of ``text`` or ``html``, to specify if the input text is plain text or HTML. :type source_language: str :param source_language: (Optional) The language of the text to be translated. :type customization_ids: str or list :param customization_ids: (Optional) ID or list of customization IDs for translation. Sets the ``cid`` parameter in the query. :type model: str :param model: (Optional) The model used to translate the text, such as ``'base'`` or ``'nmt'``. :rtype: str or list :returns: A list of dictionaries for each queried value. Each dictionary typically contains three keys (though not all will be present in all cases) * ``detectedSourceLanguage``: The detected language (as an ISO 639-1 language code) of the text. * ``translatedText``: The translation of the text into the target language. * ``input``: The corresponding input value. * ``model``: The model used to translate the text. If only a single value is passed, then only a single dictionary will be returned. :raises: :class:`~exceptions.ValueError` if the number of values and translations differ. """ single_value = False if isinstance(values, six.string_types): single_value = True values = [values] if target_language is None: target_language = self.target_language if isinstance(customization_ids, six.string_types): customization_ids = [customization_ids] data = { "target": target_language, "q": values, "cid": customization_ids, "format": format_, "source": source_language, "model": model, } response = self._connection.api_request(method="POST", path="", data=data) translations = response.get("data", {}).get("translations", ()) if len(values) != len(translations): raise ValueError( "Expected iterations to have same length", values, translations ) for value, translation in six.moves.zip(values, translations): translation["input"] = value if single_value: return translations[0] else: return translations
python
def translate( self, values, target_language=None, format_=None, source_language=None, customization_ids=(), model=None, ): """Translate a string or list of strings. See https://cloud.google.com/translate/docs/translating-text :type values: str or list :param values: String or list of strings to translate. :type target_language: str :param target_language: The language to translate results into. This is required by the API and defaults to the target language of the current instance. :type format_: str :param format_: (Optional) One of ``text`` or ``html``, to specify if the input text is plain text or HTML. :type source_language: str :param source_language: (Optional) The language of the text to be translated. :type customization_ids: str or list :param customization_ids: (Optional) ID or list of customization IDs for translation. Sets the ``cid`` parameter in the query. :type model: str :param model: (Optional) The model used to translate the text, such as ``'base'`` or ``'nmt'``. :rtype: str or list :returns: A list of dictionaries for each queried value. Each dictionary typically contains three keys (though not all will be present in all cases) * ``detectedSourceLanguage``: The detected language (as an ISO 639-1 language code) of the text. * ``translatedText``: The translation of the text into the target language. * ``input``: The corresponding input value. * ``model``: The model used to translate the text. If only a single value is passed, then only a single dictionary will be returned. :raises: :class:`~exceptions.ValueError` if the number of values and translations differ. """ single_value = False if isinstance(values, six.string_types): single_value = True values = [values] if target_language is None: target_language = self.target_language if isinstance(customization_ids, six.string_types): customization_ids = [customization_ids] data = { "target": target_language, "q": values, "cid": customization_ids, "format": format_, "source": source_language, "model": model, } response = self._connection.api_request(method="POST", path="", data=data) translations = response.get("data", {}).get("translations", ()) if len(values) != len(translations): raise ValueError( "Expected iterations to have same length", values, translations ) for value, translation in six.moves.zip(values, translations): translation["input"] = value if single_value: return translations[0] else: return translations
[ "def", "translate", "(", "self", ",", "values", ",", "target_language", "=", "None", ",", "format_", "=", "None", ",", "source_language", "=", "None", ",", "customization_ids", "=", "(", ")", ",", "model", "=", "None", ",", ")", ":", "single_value", "=", "False", "if", "isinstance", "(", "values", ",", "six", ".", "string_types", ")", ":", "single_value", "=", "True", "values", "=", "[", "values", "]", "if", "target_language", "is", "None", ":", "target_language", "=", "self", ".", "target_language", "if", "isinstance", "(", "customization_ids", ",", "six", ".", "string_types", ")", ":", "customization_ids", "=", "[", "customization_ids", "]", "data", "=", "{", "\"target\"", ":", "target_language", ",", "\"q\"", ":", "values", ",", "\"cid\"", ":", "customization_ids", ",", "\"format\"", ":", "format_", ",", "\"source\"", ":", "source_language", ",", "\"model\"", ":", "model", ",", "}", "response", "=", "self", ".", "_connection", ".", "api_request", "(", "method", "=", "\"POST\"", ",", "path", "=", "\"\"", ",", "data", "=", "data", ")", "translations", "=", "response", ".", "get", "(", "\"data\"", ",", "{", "}", ")", ".", "get", "(", "\"translations\"", ",", "(", ")", ")", "if", "len", "(", "values", ")", "!=", "len", "(", "translations", ")", ":", "raise", "ValueError", "(", "\"Expected iterations to have same length\"", ",", "values", ",", "translations", ")", "for", "value", ",", "translation", "in", "six", ".", "moves", ".", "zip", "(", "values", ",", "translations", ")", ":", "translation", "[", "\"input\"", "]", "=", "value", "if", "single_value", ":", "return", "translations", "[", "0", "]", "else", ":", "return", "translations" ]
Translate a string or list of strings. See https://cloud.google.com/translate/docs/translating-text :type values: str or list :param values: String or list of strings to translate. :type target_language: str :param target_language: The language to translate results into. This is required by the API and defaults to the target language of the current instance. :type format_: str :param format_: (Optional) One of ``text`` or ``html``, to specify if the input text is plain text or HTML. :type source_language: str :param source_language: (Optional) The language of the text to be translated. :type customization_ids: str or list :param customization_ids: (Optional) ID or list of customization IDs for translation. Sets the ``cid`` parameter in the query. :type model: str :param model: (Optional) The model used to translate the text, such as ``'base'`` or ``'nmt'``. :rtype: str or list :returns: A list of dictionaries for each queried value. Each dictionary typically contains three keys (though not all will be present in all cases) * ``detectedSourceLanguage``: The detected language (as an ISO 639-1 language code) of the text. * ``translatedText``: The translation of the text into the target language. * ``input``: The corresponding input value. * ``model``: The model used to translate the text. If only a single value is passed, then only a single dictionary will be returned. :raises: :class:`~exceptions.ValueError` if the number of values and translations differ.
[ "Translate", "a", "string", "or", "list", "of", "strings", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/translate/google/cloud/translate_v2/client.py#L165-L252
train
googleapis/google-cloud-python
pubsub/google/cloud/pubsub_v1/subscriber/_protocol/leaser.py
Leaser.add
def add(self, items): """Add messages to be managed by the leaser.""" for item in items: # Add the ack ID to the set of managed ack IDs, and increment # the size counter. if item.ack_id not in self._leased_messages: self._leased_messages[item.ack_id] = _LeasedMessage( added_time=time.time(), size=item.byte_size ) self._bytes += item.byte_size else: _LOGGER.debug("Message %s is already lease managed", item.ack_id)
python
def add(self, items): """Add messages to be managed by the leaser.""" for item in items: # Add the ack ID to the set of managed ack IDs, and increment # the size counter. if item.ack_id not in self._leased_messages: self._leased_messages[item.ack_id] = _LeasedMessage( added_time=time.time(), size=item.byte_size ) self._bytes += item.byte_size else: _LOGGER.debug("Message %s is already lease managed", item.ack_id)
[ "def", "add", "(", "self", ",", "items", ")", ":", "for", "item", "in", "items", ":", "# Add the ack ID to the set of managed ack IDs, and increment", "# the size counter.", "if", "item", ".", "ack_id", "not", "in", "self", ".", "_leased_messages", ":", "self", ".", "_leased_messages", "[", "item", ".", "ack_id", "]", "=", "_LeasedMessage", "(", "added_time", "=", "time", ".", "time", "(", ")", ",", "size", "=", "item", ".", "byte_size", ")", "self", ".", "_bytes", "+=", "item", ".", "byte_size", "else", ":", "_LOGGER", ".", "debug", "(", "\"Message %s is already lease managed\"", ",", "item", ".", "ack_id", ")" ]
Add messages to be managed by the leaser.
[ "Add", "messages", "to", "be", "managed", "by", "the", "leaser", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/pubsub/google/cloud/pubsub_v1/subscriber/_protocol/leaser.py#L65-L76
train
googleapis/google-cloud-python
pubsub/google/cloud/pubsub_v1/subscriber/_protocol/leaser.py
Leaser.remove
def remove(self, items): """Remove messages from lease management.""" # Remove the ack ID from lease management, and decrement the # byte counter. for item in items: if self._leased_messages.pop(item.ack_id, None) is not None: self._bytes -= item.byte_size else: _LOGGER.debug("Item %s was not managed.", item.ack_id) if self._bytes < 0: _LOGGER.debug("Bytes was unexpectedly negative: %d", self._bytes) self._bytes = 0
python
def remove(self, items): """Remove messages from lease management.""" # Remove the ack ID from lease management, and decrement the # byte counter. for item in items: if self._leased_messages.pop(item.ack_id, None) is not None: self._bytes -= item.byte_size else: _LOGGER.debug("Item %s was not managed.", item.ack_id) if self._bytes < 0: _LOGGER.debug("Bytes was unexpectedly negative: %d", self._bytes) self._bytes = 0
[ "def", "remove", "(", "self", ",", "items", ")", ":", "# Remove the ack ID from lease management, and decrement the", "# byte counter.", "for", "item", "in", "items", ":", "if", "self", ".", "_leased_messages", ".", "pop", "(", "item", ".", "ack_id", ",", "None", ")", "is", "not", "None", ":", "self", ".", "_bytes", "-=", "item", ".", "byte_size", "else", ":", "_LOGGER", ".", "debug", "(", "\"Item %s was not managed.\"", ",", "item", ".", "ack_id", ")", "if", "self", ".", "_bytes", "<", "0", ":", "_LOGGER", ".", "debug", "(", "\"Bytes was unexpectedly negative: %d\"", ",", "self", ".", "_bytes", ")", "self", ".", "_bytes", "=", "0" ]
Remove messages from lease management.
[ "Remove", "messages", "from", "lease", "management", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/pubsub/google/cloud/pubsub_v1/subscriber/_protocol/leaser.py#L78-L90
train
googleapis/google-cloud-python
pubsub/google/cloud/pubsub_v1/subscriber/_protocol/leaser.py
Leaser.maintain_leases
def maintain_leases(self): """Maintain all of the leases being managed. This method modifies the ack deadline for all of the managed ack IDs, then waits for most of that time (but with jitter), and repeats. """ while self._manager.is_active and not self._stop_event.is_set(): # Determine the appropriate duration for the lease. This is # based off of how long previous messages have taken to ack, with # a sensible default and within the ranges allowed by Pub/Sub. p99 = self._manager.ack_histogram.percentile(99) _LOGGER.debug("The current p99 value is %d seconds.", p99) # Make a copy of the leased messages. This is needed because it's # possible for another thread to modify the dictionary while # we're iterating over it. leased_messages = copy.copy(self._leased_messages) # Drop any leases that are well beyond max lease time. This # ensures that in the event of a badly behaving actor, we can # drop messages and allow Pub/Sub to resend them. cutoff = time.time() - self._manager.flow_control.max_lease_duration to_drop = [ requests.DropRequest(ack_id, item.size) for ack_id, item in six.iteritems(leased_messages) if item.added_time < cutoff ] if to_drop: _LOGGER.warning( "Dropping %s items because they were leased too long.", len(to_drop) ) self._manager.dispatcher.drop(to_drop) # Remove dropped items from our copy of the leased messages (they # have already been removed from the real one by # self._manager.drop(), which calls self.remove()). for item in to_drop: leased_messages.pop(item.ack_id) # Create a streaming pull request. # We do not actually call `modify_ack_deadline` over and over # because it is more efficient to make a single request. ack_ids = leased_messages.keys() if ack_ids: _LOGGER.debug("Renewing lease for %d ack IDs.", len(ack_ids)) # NOTE: This may not work as expected if ``consumer.active`` # has changed since we checked it. An implementation # without any sort of race condition would require a # way for ``send_request`` to fail when the consumer # is inactive. self._manager.dispatcher.modify_ack_deadline( [requests.ModAckRequest(ack_id, p99) for ack_id in ack_ids] ) # Now wait an appropriate period of time and do this again. # # We determine the appropriate period of time based on a random # period between 0 seconds and 90% of the lease. This use of # jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases # where there are many clients. snooze = random.uniform(0.0, p99 * 0.9) _LOGGER.debug("Snoozing lease management for %f seconds.", snooze) self._stop_event.wait(timeout=snooze) _LOGGER.info("%s exiting.", _LEASE_WORKER_NAME)
python
def maintain_leases(self): """Maintain all of the leases being managed. This method modifies the ack deadline for all of the managed ack IDs, then waits for most of that time (but with jitter), and repeats. """ while self._manager.is_active and not self._stop_event.is_set(): # Determine the appropriate duration for the lease. This is # based off of how long previous messages have taken to ack, with # a sensible default and within the ranges allowed by Pub/Sub. p99 = self._manager.ack_histogram.percentile(99) _LOGGER.debug("The current p99 value is %d seconds.", p99) # Make a copy of the leased messages. This is needed because it's # possible for another thread to modify the dictionary while # we're iterating over it. leased_messages = copy.copy(self._leased_messages) # Drop any leases that are well beyond max lease time. This # ensures that in the event of a badly behaving actor, we can # drop messages and allow Pub/Sub to resend them. cutoff = time.time() - self._manager.flow_control.max_lease_duration to_drop = [ requests.DropRequest(ack_id, item.size) for ack_id, item in six.iteritems(leased_messages) if item.added_time < cutoff ] if to_drop: _LOGGER.warning( "Dropping %s items because they were leased too long.", len(to_drop) ) self._manager.dispatcher.drop(to_drop) # Remove dropped items from our copy of the leased messages (they # have already been removed from the real one by # self._manager.drop(), which calls self.remove()). for item in to_drop: leased_messages.pop(item.ack_id) # Create a streaming pull request. # We do not actually call `modify_ack_deadline` over and over # because it is more efficient to make a single request. ack_ids = leased_messages.keys() if ack_ids: _LOGGER.debug("Renewing lease for %d ack IDs.", len(ack_ids)) # NOTE: This may not work as expected if ``consumer.active`` # has changed since we checked it. An implementation # without any sort of race condition would require a # way for ``send_request`` to fail when the consumer # is inactive. self._manager.dispatcher.modify_ack_deadline( [requests.ModAckRequest(ack_id, p99) for ack_id in ack_ids] ) # Now wait an appropriate period of time and do this again. # # We determine the appropriate period of time based on a random # period between 0 seconds and 90% of the lease. This use of # jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases # where there are many clients. snooze = random.uniform(0.0, p99 * 0.9) _LOGGER.debug("Snoozing lease management for %f seconds.", snooze) self._stop_event.wait(timeout=snooze) _LOGGER.info("%s exiting.", _LEASE_WORKER_NAME)
[ "def", "maintain_leases", "(", "self", ")", ":", "while", "self", ".", "_manager", ".", "is_active", "and", "not", "self", ".", "_stop_event", ".", "is_set", "(", ")", ":", "# Determine the appropriate duration for the lease. This is", "# based off of how long previous messages have taken to ack, with", "# a sensible default and within the ranges allowed by Pub/Sub.", "p99", "=", "self", ".", "_manager", ".", "ack_histogram", ".", "percentile", "(", "99", ")", "_LOGGER", ".", "debug", "(", "\"The current p99 value is %d seconds.\"", ",", "p99", ")", "# Make a copy of the leased messages. This is needed because it's", "# possible for another thread to modify the dictionary while", "# we're iterating over it.", "leased_messages", "=", "copy", ".", "copy", "(", "self", ".", "_leased_messages", ")", "# Drop any leases that are well beyond max lease time. This", "# ensures that in the event of a badly behaving actor, we can", "# drop messages and allow Pub/Sub to resend them.", "cutoff", "=", "time", ".", "time", "(", ")", "-", "self", ".", "_manager", ".", "flow_control", ".", "max_lease_duration", "to_drop", "=", "[", "requests", ".", "DropRequest", "(", "ack_id", ",", "item", ".", "size", ")", "for", "ack_id", ",", "item", "in", "six", ".", "iteritems", "(", "leased_messages", ")", "if", "item", ".", "added_time", "<", "cutoff", "]", "if", "to_drop", ":", "_LOGGER", ".", "warning", "(", "\"Dropping %s items because they were leased too long.\"", ",", "len", "(", "to_drop", ")", ")", "self", ".", "_manager", ".", "dispatcher", ".", "drop", "(", "to_drop", ")", "# Remove dropped items from our copy of the leased messages (they", "# have already been removed from the real one by", "# self._manager.drop(), which calls self.remove()).", "for", "item", "in", "to_drop", ":", "leased_messages", ".", "pop", "(", "item", ".", "ack_id", ")", "# Create a streaming pull request.", "# We do not actually call `modify_ack_deadline` over and over", "# because it is more efficient to make a single request.", "ack_ids", "=", "leased_messages", ".", "keys", "(", ")", "if", "ack_ids", ":", "_LOGGER", ".", "debug", "(", "\"Renewing lease for %d ack IDs.\"", ",", "len", "(", "ack_ids", ")", ")", "# NOTE: This may not work as expected if ``consumer.active``", "# has changed since we checked it. An implementation", "# without any sort of race condition would require a", "# way for ``send_request`` to fail when the consumer", "# is inactive.", "self", ".", "_manager", ".", "dispatcher", ".", "modify_ack_deadline", "(", "[", "requests", ".", "ModAckRequest", "(", "ack_id", ",", "p99", ")", "for", "ack_id", "in", "ack_ids", "]", ")", "# Now wait an appropriate period of time and do this again.", "#", "# We determine the appropriate period of time based on a random", "# period between 0 seconds and 90% of the lease. This use of", "# jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases", "# where there are many clients.", "snooze", "=", "random", ".", "uniform", "(", "0.0", ",", "p99", "*", "0.9", ")", "_LOGGER", ".", "debug", "(", "\"Snoozing lease management for %f seconds.\"", ",", "snooze", ")", "self", ".", "_stop_event", ".", "wait", "(", "timeout", "=", "snooze", ")", "_LOGGER", ".", "info", "(", "\"%s exiting.\"", ",", "_LEASE_WORKER_NAME", ")" ]
Maintain all of the leases being managed. This method modifies the ack deadline for all of the managed ack IDs, then waits for most of that time (but with jitter), and repeats.
[ "Maintain", "all", "of", "the", "leases", "being", "managed", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/pubsub/google/cloud/pubsub_v1/subscriber/_protocol/leaser.py#L92-L159
train
googleapis/google-cloud-python
error_reporting/google/cloud/error_reporting/_gapic.py
make_report_error_api
def make_report_error_api(client): """Create an instance of the gapic Logging API. :type client::class:`google.cloud.error_reporting.Client` :param client: Error Reporting client. :rtype: :class:_ErrorReportingGapicApi :returns: An Error Reporting API instance. """ gax_client = report_errors_service_client.ReportErrorsServiceClient( credentials=client._credentials, client_info=_CLIENT_INFO ) return _ErrorReportingGapicApi(gax_client, client.project)
python
def make_report_error_api(client): """Create an instance of the gapic Logging API. :type client::class:`google.cloud.error_reporting.Client` :param client: Error Reporting client. :rtype: :class:_ErrorReportingGapicApi :returns: An Error Reporting API instance. """ gax_client = report_errors_service_client.ReportErrorsServiceClient( credentials=client._credentials, client_info=_CLIENT_INFO ) return _ErrorReportingGapicApi(gax_client, client.project)
[ "def", "make_report_error_api", "(", "client", ")", ":", "gax_client", "=", "report_errors_service_client", ".", "ReportErrorsServiceClient", "(", "credentials", "=", "client", ".", "_credentials", ",", "client_info", "=", "_CLIENT_INFO", ")", "return", "_ErrorReportingGapicApi", "(", "gax_client", ",", "client", ".", "project", ")" ]
Create an instance of the gapic Logging API. :type client::class:`google.cloud.error_reporting.Client` :param client: Error Reporting client. :rtype: :class:_ErrorReportingGapicApi :returns: An Error Reporting API instance.
[ "Create", "an", "instance", "of", "the", "gapic", "Logging", "API", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/error_reporting/google/cloud/error_reporting/_gapic.py#L27-L39
train
googleapis/google-cloud-python
error_reporting/google/cloud/error_reporting/_gapic.py
_ErrorReportingGapicApi.report_error_event
def report_error_event(self, error_report): """Uses the gapic client to report the error. :type error_report: dict :param error_report: payload of the error report formatted according to https://cloud.google.com/error-reporting/docs/formatting-error-messages This object should be built using Use :meth:~`google.cloud.error_reporting.client._build_error_report` """ project_name = self._gapic_api.project_path(self._project) error_report_payload = report_errors_service_pb2.ReportedErrorEvent() ParseDict(error_report, error_report_payload) self._gapic_api.report_error_event(project_name, error_report_payload)
python
def report_error_event(self, error_report): """Uses the gapic client to report the error. :type error_report: dict :param error_report: payload of the error report formatted according to https://cloud.google.com/error-reporting/docs/formatting-error-messages This object should be built using Use :meth:~`google.cloud.error_reporting.client._build_error_report` """ project_name = self._gapic_api.project_path(self._project) error_report_payload = report_errors_service_pb2.ReportedErrorEvent() ParseDict(error_report, error_report_payload) self._gapic_api.report_error_event(project_name, error_report_payload)
[ "def", "report_error_event", "(", "self", ",", "error_report", ")", ":", "project_name", "=", "self", ".", "_gapic_api", ".", "project_path", "(", "self", ".", "_project", ")", "error_report_payload", "=", "report_errors_service_pb2", ".", "ReportedErrorEvent", "(", ")", "ParseDict", "(", "error_report", ",", "error_report_payload", ")", "self", ".", "_gapic_api", ".", "report_error_event", "(", "project_name", ",", "error_report_payload", ")" ]
Uses the gapic client to report the error. :type error_report: dict :param error_report: payload of the error report formatted according to https://cloud.google.com/error-reporting/docs/formatting-error-messages This object should be built using Use :meth:~`google.cloud.error_reporting.client._build_error_report`
[ "Uses", "the", "gapic", "client", "to", "report", "the", "error", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/error_reporting/google/cloud/error_reporting/_gapic.py#L57-L71
train
googleapis/google-cloud-python
bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py
BigtableClient.table_path
def table_path(cls, project, instance, table): """Return a fully-qualified table string.""" return google.api_core.path_template.expand( "projects/{project}/instances/{instance}/tables/{table}", project=project, instance=instance, table=table, )
python
def table_path(cls, project, instance, table): """Return a fully-qualified table string.""" return google.api_core.path_template.expand( "projects/{project}/instances/{instance}/tables/{table}", project=project, instance=instance, table=table, )
[ "def", "table_path", "(", "cls", ",", "project", ",", "instance", ",", "table", ")", ":", "return", "google", ".", "api_core", ".", "path_template", ".", "expand", "(", "\"projects/{project}/instances/{instance}/tables/{table}\"", ",", "project", "=", "project", ",", "instance", "=", "instance", ",", "table", "=", "table", ",", ")" ]
Return a fully-qualified table string.
[ "Return", "a", "fully", "-", "qualified", "table", "string", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py#L70-L77
train
googleapis/google-cloud-python
bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py
BigtableClient.read_rows
def read_rows( self, table_name, app_profile_id=None, rows=None, filter_=None, rows_limit=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Streams back the contents of all requested rows in key order, optionally applying the same Reader filter to each. Depending on their size, rows and cells may be broken up across multiple responses, but atomicity of each row will still be preserved. See the ReadRowsResponse documentation for details. Example: >>> from google.cloud import bigtable_v2 >>> >>> client = bigtable_v2.BigtableClient() >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> >>> for element in client.read_rows(table_name): ... # process element ... pass Args: table_name (str): The unique name of the table from which to read. Values are of the form ``projects/<project>/instances/<instance>/tables/<table>``. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. rows (Union[dict, ~google.cloud.bigtable_v2.types.RowSet]): The row keys and/or ranges to read. If not specified, reads from all rows. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.RowSet` filter_ (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to apply to the contents of the specified row(s). If unset, reads the entirety of each row. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.RowFilter` rows_limit (long): The read will terminate after committing to N rows' worth of results. The default (zero) is to return all results. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: Iterable[~google.cloud.bigtable_v2.types.ReadRowsResponse]. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "read_rows" not in self._inner_api_calls: self._inner_api_calls[ "read_rows" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.read_rows, default_retry=self._method_configs["ReadRows"].retry, default_timeout=self._method_configs["ReadRows"].timeout, client_info=self._client_info, ) request = bigtable_pb2.ReadRowsRequest( table_name=table_name, app_profile_id=app_profile_id, rows=rows, filter=filter_, rows_limit=rows_limit, ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("table_name", table_name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["read_rows"]( request, retry=retry, timeout=timeout, metadata=metadata )
python
def read_rows( self, table_name, app_profile_id=None, rows=None, filter_=None, rows_limit=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Streams back the contents of all requested rows in key order, optionally applying the same Reader filter to each. Depending on their size, rows and cells may be broken up across multiple responses, but atomicity of each row will still be preserved. See the ReadRowsResponse documentation for details. Example: >>> from google.cloud import bigtable_v2 >>> >>> client = bigtable_v2.BigtableClient() >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> >>> for element in client.read_rows(table_name): ... # process element ... pass Args: table_name (str): The unique name of the table from which to read. Values are of the form ``projects/<project>/instances/<instance>/tables/<table>``. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. rows (Union[dict, ~google.cloud.bigtable_v2.types.RowSet]): The row keys and/or ranges to read. If not specified, reads from all rows. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.RowSet` filter_ (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to apply to the contents of the specified row(s). If unset, reads the entirety of each row. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.RowFilter` rows_limit (long): The read will terminate after committing to N rows' worth of results. The default (zero) is to return all results. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: Iterable[~google.cloud.bigtable_v2.types.ReadRowsResponse]. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "read_rows" not in self._inner_api_calls: self._inner_api_calls[ "read_rows" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.read_rows, default_retry=self._method_configs["ReadRows"].retry, default_timeout=self._method_configs["ReadRows"].timeout, client_info=self._client_info, ) request = bigtable_pb2.ReadRowsRequest( table_name=table_name, app_profile_id=app_profile_id, rows=rows, filter=filter_, rows_limit=rows_limit, ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("table_name", table_name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["read_rows"]( request, retry=retry, timeout=timeout, metadata=metadata )
[ "def", "read_rows", "(", "self", ",", "table_name", ",", "app_profile_id", "=", "None", ",", "rows", "=", "None", ",", "filter_", "=", "None", ",", "rows_limit", "=", "None", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "metadata", "=", "None", ",", ")", ":", "# Wrap the transport method to add retry and timeout logic.", "if", "\"read_rows\"", "not", "in", "self", ".", "_inner_api_calls", ":", "self", ".", "_inner_api_calls", "[", "\"read_rows\"", "]", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "wrap_method", "(", "self", ".", "transport", ".", "read_rows", ",", "default_retry", "=", "self", ".", "_method_configs", "[", "\"ReadRows\"", "]", ".", "retry", ",", "default_timeout", "=", "self", ".", "_method_configs", "[", "\"ReadRows\"", "]", ".", "timeout", ",", "client_info", "=", "self", ".", "_client_info", ",", ")", "request", "=", "bigtable_pb2", ".", "ReadRowsRequest", "(", "table_name", "=", "table_name", ",", "app_profile_id", "=", "app_profile_id", ",", "rows", "=", "rows", ",", "filter", "=", "filter_", ",", "rows_limit", "=", "rows_limit", ",", ")", "if", "metadata", "is", "None", ":", "metadata", "=", "[", "]", "metadata", "=", "list", "(", "metadata", ")", "try", ":", "routing_header", "=", "[", "(", "\"table_name\"", ",", "table_name", ")", "]", "except", "AttributeError", ":", "pass", "else", ":", "routing_metadata", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "routing_header", ".", "to_grpc_metadata", "(", "routing_header", ")", "metadata", ".", "append", "(", "routing_metadata", ")", "return", "self", ".", "_inner_api_calls", "[", "\"read_rows\"", "]", "(", "request", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ")" ]
Streams back the contents of all requested rows in key order, optionally applying the same Reader filter to each. Depending on their size, rows and cells may be broken up across multiple responses, but atomicity of each row will still be preserved. See the ReadRowsResponse documentation for details. Example: >>> from google.cloud import bigtable_v2 >>> >>> client = bigtable_v2.BigtableClient() >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> >>> for element in client.read_rows(table_name): ... # process element ... pass Args: table_name (str): The unique name of the table from which to read. Values are of the form ``projects/<project>/instances/<instance>/tables/<table>``. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. rows (Union[dict, ~google.cloud.bigtable_v2.types.RowSet]): The row keys and/or ranges to read. If not specified, reads from all rows. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.RowSet` filter_ (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to apply to the contents of the specified row(s). If unset, reads the entirety of each row. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.RowFilter` rows_limit (long): The read will terminate after committing to N rows' worth of results. The default (zero) is to return all results. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: Iterable[~google.cloud.bigtable_v2.types.ReadRowsResponse]. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
[ "Streams", "back", "the", "contents", "of", "all", "requested", "rows", "in", "key", "order", "optionally", "applying", "the", "same", "Reader", "filter", "to", "each", ".", "Depending", "on", "their", "size", "rows", "and", "cells", "may", "be", "broken", "up", "across", "multiple", "responses", "but", "atomicity", "of", "each", "row", "will", "still", "be", "preserved", ".", "See", "the", "ReadRowsResponse", "documentation", "for", "details", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py#L178-L275
train
googleapis/google-cloud-python
bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py
BigtableClient.mutate_rows
def mutate_rows( self, table_name, entries, app_profile_id=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Mutates multiple rows in a batch. Each individual row is mutated atomically as in MutateRow, but the entire batch is not executed atomically. Example: >>> from google.cloud import bigtable_v2 >>> >>> client = bigtable_v2.BigtableClient() >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> >>> # TODO: Initialize `entries`: >>> entries = [] >>> >>> for element in client.mutate_rows(table_name, entries): ... # process element ... pass Args: table_name (str): The unique name of the table to which the mutations should be applied. entries (list[Union[dict, ~google.cloud.bigtable_v2.types.Entry]]): The row keys and corresponding mutations to be applied in bulk. Each entry is applied as an atomic mutation, but the entries may be applied in arbitrary order (even between entries for the same row). At least one entry must be specified, and in total the entries can contain at most 100000 mutations. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Entry` app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: Iterable[~google.cloud.bigtable_v2.types.MutateRowsResponse]. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "mutate_rows" not in self._inner_api_calls: self._inner_api_calls[ "mutate_rows" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.mutate_rows, default_retry=self._method_configs["MutateRows"].retry, default_timeout=self._method_configs["MutateRows"].timeout, client_info=self._client_info, ) request = bigtable_pb2.MutateRowsRequest( table_name=table_name, entries=entries, app_profile_id=app_profile_id ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("table_name", table_name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["mutate_rows"]( request, retry=retry, timeout=timeout, metadata=metadata )
python
def mutate_rows( self, table_name, entries, app_profile_id=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Mutates multiple rows in a batch. Each individual row is mutated atomically as in MutateRow, but the entire batch is not executed atomically. Example: >>> from google.cloud import bigtable_v2 >>> >>> client = bigtable_v2.BigtableClient() >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> >>> # TODO: Initialize `entries`: >>> entries = [] >>> >>> for element in client.mutate_rows(table_name, entries): ... # process element ... pass Args: table_name (str): The unique name of the table to which the mutations should be applied. entries (list[Union[dict, ~google.cloud.bigtable_v2.types.Entry]]): The row keys and corresponding mutations to be applied in bulk. Each entry is applied as an atomic mutation, but the entries may be applied in arbitrary order (even between entries for the same row). At least one entry must be specified, and in total the entries can contain at most 100000 mutations. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Entry` app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: Iterable[~google.cloud.bigtable_v2.types.MutateRowsResponse]. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "mutate_rows" not in self._inner_api_calls: self._inner_api_calls[ "mutate_rows" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.mutate_rows, default_retry=self._method_configs["MutateRows"].retry, default_timeout=self._method_configs["MutateRows"].timeout, client_info=self._client_info, ) request = bigtable_pb2.MutateRowsRequest( table_name=table_name, entries=entries, app_profile_id=app_profile_id ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("table_name", table_name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["mutate_rows"]( request, retry=retry, timeout=timeout, metadata=metadata )
[ "def", "mutate_rows", "(", "self", ",", "table_name", ",", "entries", ",", "app_profile_id", "=", "None", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "metadata", "=", "None", ",", ")", ":", "# Wrap the transport method to add retry and timeout logic.", "if", "\"mutate_rows\"", "not", "in", "self", ".", "_inner_api_calls", ":", "self", ".", "_inner_api_calls", "[", "\"mutate_rows\"", "]", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "wrap_method", "(", "self", ".", "transport", ".", "mutate_rows", ",", "default_retry", "=", "self", ".", "_method_configs", "[", "\"MutateRows\"", "]", ".", "retry", ",", "default_timeout", "=", "self", ".", "_method_configs", "[", "\"MutateRows\"", "]", ".", "timeout", ",", "client_info", "=", "self", ".", "_client_info", ",", ")", "request", "=", "bigtable_pb2", ".", "MutateRowsRequest", "(", "table_name", "=", "table_name", ",", "entries", "=", "entries", ",", "app_profile_id", "=", "app_profile_id", ")", "if", "metadata", "is", "None", ":", "metadata", "=", "[", "]", "metadata", "=", "list", "(", "metadata", ")", "try", ":", "routing_header", "=", "[", "(", "\"table_name\"", ",", "table_name", ")", "]", "except", "AttributeError", ":", "pass", "else", ":", "routing_metadata", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "routing_header", ".", "to_grpc_metadata", "(", "routing_header", ")", "metadata", ".", "append", "(", "routing_metadata", ")", "return", "self", ".", "_inner_api_calls", "[", "\"mutate_rows\"", "]", "(", "request", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ")" ]
Mutates multiple rows in a batch. Each individual row is mutated atomically as in MutateRow, but the entire batch is not executed atomically. Example: >>> from google.cloud import bigtable_v2 >>> >>> client = bigtable_v2.BigtableClient() >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> >>> # TODO: Initialize `entries`: >>> entries = [] >>> >>> for element in client.mutate_rows(table_name, entries): ... # process element ... pass Args: table_name (str): The unique name of the table to which the mutations should be applied. entries (list[Union[dict, ~google.cloud.bigtable_v2.types.Entry]]): The row keys and corresponding mutations to be applied in bulk. Each entry is applied as an atomic mutation, but the entries may be applied in arbitrary order (even between entries for the same row). At least one entry must be specified, and in total the entries can contain at most 100000 mutations. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Entry` app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: Iterable[~google.cloud.bigtable_v2.types.MutateRowsResponse]. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
[ "Mutates", "multiple", "rows", "in", "a", "batch", ".", "Each", "individual", "row", "is", "mutated", "atomically", "as", "in", "MutateRow", "but", "the", "entire", "batch", "is", "not", "executed", "atomically", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py#L452-L540
train
googleapis/google-cloud-python
bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py
BigtableClient.check_and_mutate_row
def check_and_mutate_row( self, table_name, row_key, app_profile_id=None, predicate_filter=None, true_mutations=None, false_mutations=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Mutates a row atomically based on the output of a predicate Reader filter. Example: >>> from google.cloud import bigtable_v2 >>> >>> client = bigtable_v2.BigtableClient() >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> >>> # TODO: Initialize `row_key`: >>> row_key = b'' >>> >>> response = client.check_and_mutate_row(table_name, row_key) Args: table_name (str): The unique name of the table to which the conditional mutation should be applied. Values are of the form ``projects/<project>/instances/<instance>/tables/<table>``. row_key (bytes): The key of the row to which the conditional mutation should be applied. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. Depending on whether or not any results are yielded, either ``true_mutations`` or ``false_mutations`` will be executed. If unset, checks that the row contains any values at all. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.RowFilter` true_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if ``predicate_filter`` yields at least one cell when applied to ``row_key``. Entries are applied in order, meaning that earlier mutations can be masked by later ones. Must contain at least one entry if ``false_mutations`` is empty, and at most 100000. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Mutation` false_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if ``predicate_filter`` does not yield any cells when applied to ``row_key``. Entries are applied in order, meaning that earlier mutations can be masked by later ones. Must contain at least one entry if ``true_mutations`` is empty, and at most 100000. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Mutation` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.bigtable_v2.types.CheckAndMutateRowResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "check_and_mutate_row" not in self._inner_api_calls: self._inner_api_calls[ "check_and_mutate_row" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.check_and_mutate_row, default_retry=self._method_configs["CheckAndMutateRow"].retry, default_timeout=self._method_configs["CheckAndMutateRow"].timeout, client_info=self._client_info, ) request = bigtable_pb2.CheckAndMutateRowRequest( table_name=table_name, row_key=row_key, app_profile_id=app_profile_id, predicate_filter=predicate_filter, true_mutations=true_mutations, false_mutations=false_mutations, ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("table_name", table_name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["check_and_mutate_row"]( request, retry=retry, timeout=timeout, metadata=metadata )
python
def check_and_mutate_row( self, table_name, row_key, app_profile_id=None, predicate_filter=None, true_mutations=None, false_mutations=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Mutates a row atomically based on the output of a predicate Reader filter. Example: >>> from google.cloud import bigtable_v2 >>> >>> client = bigtable_v2.BigtableClient() >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> >>> # TODO: Initialize `row_key`: >>> row_key = b'' >>> >>> response = client.check_and_mutate_row(table_name, row_key) Args: table_name (str): The unique name of the table to which the conditional mutation should be applied. Values are of the form ``projects/<project>/instances/<instance>/tables/<table>``. row_key (bytes): The key of the row to which the conditional mutation should be applied. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. Depending on whether or not any results are yielded, either ``true_mutations`` or ``false_mutations`` will be executed. If unset, checks that the row contains any values at all. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.RowFilter` true_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if ``predicate_filter`` yields at least one cell when applied to ``row_key``. Entries are applied in order, meaning that earlier mutations can be masked by later ones. Must contain at least one entry if ``false_mutations`` is empty, and at most 100000. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Mutation` false_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if ``predicate_filter`` does not yield any cells when applied to ``row_key``. Entries are applied in order, meaning that earlier mutations can be masked by later ones. Must contain at least one entry if ``true_mutations`` is empty, and at most 100000. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Mutation` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.bigtable_v2.types.CheckAndMutateRowResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "check_and_mutate_row" not in self._inner_api_calls: self._inner_api_calls[ "check_and_mutate_row" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.check_and_mutate_row, default_retry=self._method_configs["CheckAndMutateRow"].retry, default_timeout=self._method_configs["CheckAndMutateRow"].timeout, client_info=self._client_info, ) request = bigtable_pb2.CheckAndMutateRowRequest( table_name=table_name, row_key=row_key, app_profile_id=app_profile_id, predicate_filter=predicate_filter, true_mutations=true_mutations, false_mutations=false_mutations, ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("table_name", table_name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["check_and_mutate_row"]( request, retry=retry, timeout=timeout, metadata=metadata )
[ "def", "check_and_mutate_row", "(", "self", ",", "table_name", ",", "row_key", ",", "app_profile_id", "=", "None", ",", "predicate_filter", "=", "None", ",", "true_mutations", "=", "None", ",", "false_mutations", "=", "None", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "metadata", "=", "None", ",", ")", ":", "# Wrap the transport method to add retry and timeout logic.", "if", "\"check_and_mutate_row\"", "not", "in", "self", ".", "_inner_api_calls", ":", "self", ".", "_inner_api_calls", "[", "\"check_and_mutate_row\"", "]", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "wrap_method", "(", "self", ".", "transport", ".", "check_and_mutate_row", ",", "default_retry", "=", "self", ".", "_method_configs", "[", "\"CheckAndMutateRow\"", "]", ".", "retry", ",", "default_timeout", "=", "self", ".", "_method_configs", "[", "\"CheckAndMutateRow\"", "]", ".", "timeout", ",", "client_info", "=", "self", ".", "_client_info", ",", ")", "request", "=", "bigtable_pb2", ".", "CheckAndMutateRowRequest", "(", "table_name", "=", "table_name", ",", "row_key", "=", "row_key", ",", "app_profile_id", "=", "app_profile_id", ",", "predicate_filter", "=", "predicate_filter", ",", "true_mutations", "=", "true_mutations", ",", "false_mutations", "=", "false_mutations", ",", ")", "if", "metadata", "is", "None", ":", "metadata", "=", "[", "]", "metadata", "=", "list", "(", "metadata", ")", "try", ":", "routing_header", "=", "[", "(", "\"table_name\"", ",", "table_name", ")", "]", "except", "AttributeError", ":", "pass", "else", ":", "routing_metadata", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "routing_header", ".", "to_grpc_metadata", "(", "routing_header", ")", "metadata", ".", "append", "(", "routing_metadata", ")", "return", "self", ".", "_inner_api_calls", "[", "\"check_and_mutate_row\"", "]", "(", "request", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ")" ]
Mutates a row atomically based on the output of a predicate Reader filter. Example: >>> from google.cloud import bigtable_v2 >>> >>> client = bigtable_v2.BigtableClient() >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> >>> # TODO: Initialize `row_key`: >>> row_key = b'' >>> >>> response = client.check_and_mutate_row(table_name, row_key) Args: table_name (str): The unique name of the table to which the conditional mutation should be applied. Values are of the form ``projects/<project>/instances/<instance>/tables/<table>``. row_key (bytes): The key of the row to which the conditional mutation should be applied. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. Depending on whether or not any results are yielded, either ``true_mutations`` or ``false_mutations`` will be executed. If unset, checks that the row contains any values at all. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.RowFilter` true_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if ``predicate_filter`` yields at least one cell when applied to ``row_key``. Entries are applied in order, meaning that earlier mutations can be masked by later ones. Must contain at least one entry if ``false_mutations`` is empty, and at most 100000. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Mutation` false_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if ``predicate_filter`` does not yield any cells when applied to ``row_key``. Entries are applied in order, meaning that earlier mutations can be masked by later ones. Must contain at least one entry if ``true_mutations`` is empty, and at most 100000. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Mutation` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.bigtable_v2.types.CheckAndMutateRowResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
[ "Mutates", "a", "row", "atomically", "based", "on", "the", "output", "of", "a", "predicate", "Reader", "filter", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py#L542-L652
train
googleapis/google-cloud-python
bigtable/google/cloud/bigtable/instance.py
Instance._update_from_pb
def _update_from_pb(self, instance_pb): """Refresh self from the server-provided protobuf. Helper for :meth:`from_pb` and :meth:`reload`. """ if not instance_pb.display_name: # Simple field (string) raise ValueError("Instance protobuf does not contain display_name") self.display_name = instance_pb.display_name self.type_ = instance_pb.type self.labels = dict(instance_pb.labels) self._state = instance_pb.state
python
def _update_from_pb(self, instance_pb): """Refresh self from the server-provided protobuf. Helper for :meth:`from_pb` and :meth:`reload`. """ if not instance_pb.display_name: # Simple field (string) raise ValueError("Instance protobuf does not contain display_name") self.display_name = instance_pb.display_name self.type_ = instance_pb.type self.labels = dict(instance_pb.labels) self._state = instance_pb.state
[ "def", "_update_from_pb", "(", "self", ",", "instance_pb", ")", ":", "if", "not", "instance_pb", ".", "display_name", ":", "# Simple field (string)", "raise", "ValueError", "(", "\"Instance protobuf does not contain display_name\"", ")", "self", ".", "display_name", "=", "instance_pb", ".", "display_name", "self", ".", "type_", "=", "instance_pb", ".", "type", "self", ".", "labels", "=", "dict", "(", "instance_pb", ".", "labels", ")", "self", ".", "_state", "=", "instance_pb", ".", "state" ]
Refresh self from the server-provided protobuf. Helper for :meth:`from_pb` and :meth:`reload`.
[ "Refresh", "self", "from", "the", "server", "-", "provided", "protobuf", ".", "Helper", "for", ":", "meth", ":", "from_pb", "and", ":", "meth", ":", "reload", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/instance.py#L118-L127
train
googleapis/google-cloud-python
bigtable/google/cloud/bigtable/instance.py
Instance.name
def name(self): """Instance name used in requests. .. note:: This property will not change if ``instance_id`` does not, but the return value is not cached. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_instance_name] :end-before: [END bigtable_instance_name] The instance name is of the form ``"projects/{project}/instances/{instance_id}"`` :rtype: str :returns: Return a fully-qualified instance string. """ return self._client.instance_admin_client.instance_path( project=self._client.project, instance=self.instance_id )
python
def name(self): """Instance name used in requests. .. note:: This property will not change if ``instance_id`` does not, but the return value is not cached. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_instance_name] :end-before: [END bigtable_instance_name] The instance name is of the form ``"projects/{project}/instances/{instance_id}"`` :rtype: str :returns: Return a fully-qualified instance string. """ return self._client.instance_admin_client.instance_path( project=self._client.project, instance=self.instance_id )
[ "def", "name", "(", "self", ")", ":", "return", "self", ".", "_client", ".", "instance_admin_client", ".", "instance_path", "(", "project", "=", "self", ".", "_client", ".", "project", ",", "instance", "=", "self", ".", "instance_id", ")" ]
Instance name used in requests. .. note:: This property will not change if ``instance_id`` does not, but the return value is not cached. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_instance_name] :end-before: [END bigtable_instance_name] The instance name is of the form ``"projects/{project}/instances/{instance_id}"`` :rtype: str :returns: Return a fully-qualified instance string.
[ "Instance", "name", "used", "in", "requests", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/instance.py#L170-L192
train
googleapis/google-cloud-python
bigtable/google/cloud/bigtable/instance.py
Instance.create
def create( self, location_id=None, serve_nodes=None, default_storage_type=None, clusters=None, ): """Create this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_create_prod_instance] :end-before: [END bigtable_create_prod_instance] .. note:: Uses the ``project`` and ``instance_id`` on the current :class:`Instance` in addition to the ``display_name``. To change them before creating, reset the values via .. code:: python instance.display_name = 'New display name' instance.instance_id = 'i-changed-my-mind' before calling :meth:`create`. :type location_id: str :param location_id: (Creation Only) The location where nodes and storage of the cluster owned by this instance reside. For best performance, clients should be located as close as possible to cluster's location. For list of supported locations refer to https://cloud.google.com/bigtable/docs/locations :type serve_nodes: int :param serve_nodes: (Optional) The number of nodes in the instance's cluster; used to set up the instance's cluster. :type default_storage_type: int :param default_storage_type: (Optional) The storage media type for persisting Bigtable data. Possible values are represented by the following constants: :data:`google.cloud.bigtable.enums.StorageType.SSD`. :data:`google.cloud.bigtable.enums.StorageType.SHD`, Defaults to :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`. :type clusters: class:`~[~google.cloud.bigtable.cluster.Cluster]` :param clusters: List of clusters to be created. :rtype: :class:`~google.api_core.operation.Operation` :returns: The long-running operation corresponding to the create operation. :raises: :class:`ValueError <exceptions.ValueError>` if both ``clusters`` and one of ``location_id``, ``serve_nodes`` and ``default_storage_type`` are set. """ if clusters is None: warnings.warn( _INSTANCE_CREATE_WARNING.format( "location_id", "serve_nodes", "default_storage_type" ), DeprecationWarning, stacklevel=2, ) cluster_id = "{}-cluster".format(self.instance_id) clusters = [ self.cluster( cluster_id, location_id=location_id, serve_nodes=serve_nodes, default_storage_type=default_storage_type, ) ] elif ( location_id is not None or serve_nodes is not None or default_storage_type is not None ): raise ValueError( "clusters and one of location_id, serve_nodes, \ default_storage_type can not be set \ simultaneously." ) instance_pb = instance_pb2.Instance( display_name=self.display_name, type=self.type_, labels=self.labels ) parent = self._client.project_path return self._client.instance_admin_client.create_instance( parent=parent, instance_id=self.instance_id, instance=instance_pb, clusters={c.cluster_id: c._to_pb() for c in clusters}, )
python
def create( self, location_id=None, serve_nodes=None, default_storage_type=None, clusters=None, ): """Create this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_create_prod_instance] :end-before: [END bigtable_create_prod_instance] .. note:: Uses the ``project`` and ``instance_id`` on the current :class:`Instance` in addition to the ``display_name``. To change them before creating, reset the values via .. code:: python instance.display_name = 'New display name' instance.instance_id = 'i-changed-my-mind' before calling :meth:`create`. :type location_id: str :param location_id: (Creation Only) The location where nodes and storage of the cluster owned by this instance reside. For best performance, clients should be located as close as possible to cluster's location. For list of supported locations refer to https://cloud.google.com/bigtable/docs/locations :type serve_nodes: int :param serve_nodes: (Optional) The number of nodes in the instance's cluster; used to set up the instance's cluster. :type default_storage_type: int :param default_storage_type: (Optional) The storage media type for persisting Bigtable data. Possible values are represented by the following constants: :data:`google.cloud.bigtable.enums.StorageType.SSD`. :data:`google.cloud.bigtable.enums.StorageType.SHD`, Defaults to :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`. :type clusters: class:`~[~google.cloud.bigtable.cluster.Cluster]` :param clusters: List of clusters to be created. :rtype: :class:`~google.api_core.operation.Operation` :returns: The long-running operation corresponding to the create operation. :raises: :class:`ValueError <exceptions.ValueError>` if both ``clusters`` and one of ``location_id``, ``serve_nodes`` and ``default_storage_type`` are set. """ if clusters is None: warnings.warn( _INSTANCE_CREATE_WARNING.format( "location_id", "serve_nodes", "default_storage_type" ), DeprecationWarning, stacklevel=2, ) cluster_id = "{}-cluster".format(self.instance_id) clusters = [ self.cluster( cluster_id, location_id=location_id, serve_nodes=serve_nodes, default_storage_type=default_storage_type, ) ] elif ( location_id is not None or serve_nodes is not None or default_storage_type is not None ): raise ValueError( "clusters and one of location_id, serve_nodes, \ default_storage_type can not be set \ simultaneously." ) instance_pb = instance_pb2.Instance( display_name=self.display_name, type=self.type_, labels=self.labels ) parent = self._client.project_path return self._client.instance_admin_client.create_instance( parent=parent, instance_id=self.instance_id, instance=instance_pb, clusters={c.cluster_id: c._to_pb() for c in clusters}, )
[ "def", "create", "(", "self", ",", "location_id", "=", "None", ",", "serve_nodes", "=", "None", ",", "default_storage_type", "=", "None", ",", "clusters", "=", "None", ",", ")", ":", "if", "clusters", "is", "None", ":", "warnings", ".", "warn", "(", "_INSTANCE_CREATE_WARNING", ".", "format", "(", "\"location_id\"", ",", "\"serve_nodes\"", ",", "\"default_storage_type\"", ")", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ",", ")", "cluster_id", "=", "\"{}-cluster\"", ".", "format", "(", "self", ".", "instance_id", ")", "clusters", "=", "[", "self", ".", "cluster", "(", "cluster_id", ",", "location_id", "=", "location_id", ",", "serve_nodes", "=", "serve_nodes", ",", "default_storage_type", "=", "default_storage_type", ",", ")", "]", "elif", "(", "location_id", "is", "not", "None", "or", "serve_nodes", "is", "not", "None", "or", "default_storage_type", "is", "not", "None", ")", ":", "raise", "ValueError", "(", "\"clusters and one of location_id, serve_nodes, \\\n default_storage_type can not be set \\\n simultaneously.\"", ")", "instance_pb", "=", "instance_pb2", ".", "Instance", "(", "display_name", "=", "self", ".", "display_name", ",", "type", "=", "self", ".", "type_", ",", "labels", "=", "self", ".", "labels", ")", "parent", "=", "self", ".", "_client", ".", "project_path", "return", "self", ".", "_client", ".", "instance_admin_client", ".", "create_instance", "(", "parent", "=", "parent", ",", "instance_id", "=", "self", ".", "instance_id", ",", "instance", "=", "instance_pb", ",", "clusters", "=", "{", "c", ".", "cluster_id", ":", "c", ".", "_to_pb", "(", ")", "for", "c", "in", "clusters", "}", ",", ")" ]
Create this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_create_prod_instance] :end-before: [END bigtable_create_prod_instance] .. note:: Uses the ``project`` and ``instance_id`` on the current :class:`Instance` in addition to the ``display_name``. To change them before creating, reset the values via .. code:: python instance.display_name = 'New display name' instance.instance_id = 'i-changed-my-mind' before calling :meth:`create`. :type location_id: str :param location_id: (Creation Only) The location where nodes and storage of the cluster owned by this instance reside. For best performance, clients should be located as close as possible to cluster's location. For list of supported locations refer to https://cloud.google.com/bigtable/docs/locations :type serve_nodes: int :param serve_nodes: (Optional) The number of nodes in the instance's cluster; used to set up the instance's cluster. :type default_storage_type: int :param default_storage_type: (Optional) The storage media type for persisting Bigtable data. Possible values are represented by the following constants: :data:`google.cloud.bigtable.enums.StorageType.SSD`. :data:`google.cloud.bigtable.enums.StorageType.SHD`, Defaults to :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`. :type clusters: class:`~[~google.cloud.bigtable.cluster.Cluster]` :param clusters: List of clusters to be created. :rtype: :class:`~google.api_core.operation.Operation` :returns: The long-running operation corresponding to the create operation. :raises: :class:`ValueError <exceptions.ValueError>` if both ``clusters`` and one of ``location_id``, ``serve_nodes`` and ``default_storage_type`` are set.
[ "Create", "this", "instance", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/instance.py#L221-L325
train
googleapis/google-cloud-python
bigtable/google/cloud/bigtable/instance.py
Instance.exists
def exists(self): """Check whether the instance already exists. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_check_instance_exists] :end-before: [END bigtable_check_instance_exists] :rtype: bool :returns: True if the table exists, else False. """ try: self._client.instance_admin_client.get_instance(name=self.name) return True # NOTE: There could be other exceptions that are returned to the user. except NotFound: return False
python
def exists(self): """Check whether the instance already exists. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_check_instance_exists] :end-before: [END bigtable_check_instance_exists] :rtype: bool :returns: True if the table exists, else False. """ try: self._client.instance_admin_client.get_instance(name=self.name) return True # NOTE: There could be other exceptions that are returned to the user. except NotFound: return False
[ "def", "exists", "(", "self", ")", ":", "try", ":", "self", ".", "_client", ".", "instance_admin_client", ".", "get_instance", "(", "name", "=", "self", ".", "name", ")", "return", "True", "# NOTE: There could be other exceptions that are returned to the user.", "except", "NotFound", ":", "return", "False" ]
Check whether the instance already exists. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_check_instance_exists] :end-before: [END bigtable_check_instance_exists] :rtype: bool :returns: True if the table exists, else False.
[ "Check", "whether", "the", "instance", "already", "exists", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/instance.py#L327-L344
train
googleapis/google-cloud-python
bigtable/google/cloud/bigtable/instance.py
Instance.reload
def reload(self): """Reload the metadata for this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_reload_instance] :end-before: [END bigtable_reload_instance] """ instance_pb = self._client.instance_admin_client.get_instance(self.name) # NOTE: _update_from_pb does not check that the project and # instance ID on the response match the request. self._update_from_pb(instance_pb)
python
def reload(self): """Reload the metadata for this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_reload_instance] :end-before: [END bigtable_reload_instance] """ instance_pb = self._client.instance_admin_client.get_instance(self.name) # NOTE: _update_from_pb does not check that the project and # instance ID on the response match the request. self._update_from_pb(instance_pb)
[ "def", "reload", "(", "self", ")", ":", "instance_pb", "=", "self", ".", "_client", ".", "instance_admin_client", ".", "get_instance", "(", "self", ".", "name", ")", "# NOTE: _update_from_pb does not check that the project and", "# instance ID on the response match the request.", "self", ".", "_update_from_pb", "(", "instance_pb", ")" ]
Reload the metadata for this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_reload_instance] :end-before: [END bigtable_reload_instance]
[ "Reload", "the", "metadata", "for", "this", "instance", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/instance.py#L346-L359
train
googleapis/google-cloud-python
bigtable/google/cloud/bigtable/instance.py
Instance.update
def update(self): """Updates an instance within a project. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_update_instance] :end-before: [END bigtable_update_instance] .. note:: Updates any or all of the following values: ``display_name`` ``type`` ``labels`` To change a value before updating, assign that values via .. code:: python instance.display_name = 'New display name' before calling :meth:`update`. :rtype: :class:`~google.api_core.operation.Operation` :returns: The long-running operation corresponding to the update operation. """ update_mask_pb = field_mask_pb2.FieldMask() if self.display_name is not None: update_mask_pb.paths.append("display_name") if self.type_ is not None: update_mask_pb.paths.append("type") if self.labels is not None: update_mask_pb.paths.append("labels") instance_pb = instance_pb2.Instance( name=self.name, display_name=self.display_name, type=self.type_, labels=self.labels, ) return self._client.instance_admin_client.partial_update_instance( instance=instance_pb, update_mask=update_mask_pb )
python
def update(self): """Updates an instance within a project. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_update_instance] :end-before: [END bigtable_update_instance] .. note:: Updates any or all of the following values: ``display_name`` ``type`` ``labels`` To change a value before updating, assign that values via .. code:: python instance.display_name = 'New display name' before calling :meth:`update`. :rtype: :class:`~google.api_core.operation.Operation` :returns: The long-running operation corresponding to the update operation. """ update_mask_pb = field_mask_pb2.FieldMask() if self.display_name is not None: update_mask_pb.paths.append("display_name") if self.type_ is not None: update_mask_pb.paths.append("type") if self.labels is not None: update_mask_pb.paths.append("labels") instance_pb = instance_pb2.Instance( name=self.name, display_name=self.display_name, type=self.type_, labels=self.labels, ) return self._client.instance_admin_client.partial_update_instance( instance=instance_pb, update_mask=update_mask_pb )
[ "def", "update", "(", "self", ")", ":", "update_mask_pb", "=", "field_mask_pb2", ".", "FieldMask", "(", ")", "if", "self", ".", "display_name", "is", "not", "None", ":", "update_mask_pb", ".", "paths", ".", "append", "(", "\"display_name\"", ")", "if", "self", ".", "type_", "is", "not", "None", ":", "update_mask_pb", ".", "paths", ".", "append", "(", "\"type\"", ")", "if", "self", ".", "labels", "is", "not", "None", ":", "update_mask_pb", ".", "paths", ".", "append", "(", "\"labels\"", ")", "instance_pb", "=", "instance_pb2", ".", "Instance", "(", "name", "=", "self", ".", "name", ",", "display_name", "=", "self", ".", "display_name", ",", "type", "=", "self", ".", "type_", ",", "labels", "=", "self", ".", "labels", ",", ")", "return", "self", ".", "_client", ".", "instance_admin_client", ".", "partial_update_instance", "(", "instance", "=", "instance_pb", ",", "update_mask", "=", "update_mask_pb", ")" ]
Updates an instance within a project. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_update_instance] :end-before: [END bigtable_update_instance] .. note:: Updates any or all of the following values: ``display_name`` ``type`` ``labels`` To change a value before updating, assign that values via .. code:: python instance.display_name = 'New display name' before calling :meth:`update`. :rtype: :class:`~google.api_core.operation.Operation` :returns: The long-running operation corresponding to the update operation.
[ "Updates", "an", "instance", "within", "a", "project", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/instance.py#L361-L405
train
googleapis/google-cloud-python
bigtable/google/cloud/bigtable/instance.py
Instance.get_iam_policy
def get_iam_policy(self): """Gets the access control policy for an instance resource. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_get_iam_policy] :end-before: [END bigtable_get_iam_policy] :rtype: :class:`google.cloud.bigtable.policy.Policy` :returns: The current IAM policy of this instance """ instance_admin_client = self._client.instance_admin_client resp = instance_admin_client.get_iam_policy(resource=self.name) return Policy.from_pb(resp)
python
def get_iam_policy(self): """Gets the access control policy for an instance resource. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_get_iam_policy] :end-before: [END bigtable_get_iam_policy] :rtype: :class:`google.cloud.bigtable.policy.Policy` :returns: The current IAM policy of this instance """ instance_admin_client = self._client.instance_admin_client resp = instance_admin_client.get_iam_policy(resource=self.name) return Policy.from_pb(resp)
[ "def", "get_iam_policy", "(", "self", ")", ":", "instance_admin_client", "=", "self", ".", "_client", ".", "instance_admin_client", "resp", "=", "instance_admin_client", ".", "get_iam_policy", "(", "resource", "=", "self", ".", "name", ")", "return", "Policy", ".", "from_pb", "(", "resp", ")" ]
Gets the access control policy for an instance resource. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_get_iam_policy] :end-before: [END bigtable_get_iam_policy] :rtype: :class:`google.cloud.bigtable.policy.Policy` :returns: The current IAM policy of this instance
[ "Gets", "the", "access", "control", "policy", "for", "an", "instance", "resource", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/instance.py#L437-L451
train
googleapis/google-cloud-python
bigtable/google/cloud/bigtable/instance.py
Instance.set_iam_policy
def set_iam_policy(self, policy): """Sets the access control policy on an instance resource. Replaces any existing policy. For more information about policy, please see documentation of class `google.cloud.bigtable.policy.Policy` For example: .. literalinclude:: snippets.py :start-after: [START bigtable_set_iam_policy] :end-before: [END bigtable_set_iam_policy] :type policy: :class:`google.cloud.bigtable.policy.Policy` :param policy: A new IAM policy to replace the current IAM policy of this instance :rtype: :class:`google.cloud.bigtable.policy.Policy` :returns: The current IAM policy of this instance. """ instance_admin_client = self._client.instance_admin_client resp = instance_admin_client.set_iam_policy( resource=self.name, policy=policy.to_pb() ) return Policy.from_pb(resp)
python
def set_iam_policy(self, policy): """Sets the access control policy on an instance resource. Replaces any existing policy. For more information about policy, please see documentation of class `google.cloud.bigtable.policy.Policy` For example: .. literalinclude:: snippets.py :start-after: [START bigtable_set_iam_policy] :end-before: [END bigtable_set_iam_policy] :type policy: :class:`google.cloud.bigtable.policy.Policy` :param policy: A new IAM policy to replace the current IAM policy of this instance :rtype: :class:`google.cloud.bigtable.policy.Policy` :returns: The current IAM policy of this instance. """ instance_admin_client = self._client.instance_admin_client resp = instance_admin_client.set_iam_policy( resource=self.name, policy=policy.to_pb() ) return Policy.from_pb(resp)
[ "def", "set_iam_policy", "(", "self", ",", "policy", ")", ":", "instance_admin_client", "=", "self", ".", "_client", ".", "instance_admin_client", "resp", "=", "instance_admin_client", ".", "set_iam_policy", "(", "resource", "=", "self", ".", "name", ",", "policy", "=", "policy", ".", "to_pb", "(", ")", ")", "return", "Policy", ".", "from_pb", "(", "resp", ")" ]
Sets the access control policy on an instance resource. Replaces any existing policy. For more information about policy, please see documentation of class `google.cloud.bigtable.policy.Policy` For example: .. literalinclude:: snippets.py :start-after: [START bigtable_set_iam_policy] :end-before: [END bigtable_set_iam_policy] :type policy: :class:`google.cloud.bigtable.policy.Policy` :param policy: A new IAM policy to replace the current IAM policy of this instance :rtype: :class:`google.cloud.bigtable.policy.Policy` :returns: The current IAM policy of this instance.
[ "Sets", "the", "access", "control", "policy", "on", "an", "instance", "resource", ".", "Replaces", "any", "existing", "policy", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/instance.py#L453-L477
train
googleapis/google-cloud-python
bigtable/google/cloud/bigtable/instance.py
Instance.cluster
def cluster( self, cluster_id, location_id=None, serve_nodes=None, default_storage_type=None ): """Factory to create a cluster associated with this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_create_cluster] :end-before: [END bigtable_create_cluster] :type cluster_id: str :param cluster_id: The ID of the cluster. :type instance: :class:`~google.cloud.bigtable.instance.Instance` :param instance: The instance where the cluster resides. :type location_id: str :param location_id: (Creation Only) The location where this cluster's nodes and storage reside. For best performance, clients should be located as close as possible to this cluster. For list of supported locations refer to https://cloud.google.com/bigtable/docs/locations :type serve_nodes: int :param serve_nodes: (Optional) The number of nodes in the cluster. :type default_storage_type: int :param default_storage_type: (Optional) The type of storage Possible values are represented by the following constants: :data:`google.cloud.bigtable.enums.StorageType.SSD`. :data:`google.cloud.bigtable.enums.StorageType.SHD`, Defaults to :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`. :rtype: :class:`~google.cloud.bigtable.instance.Cluster` :returns: a cluster owned by this instance. """ return Cluster( cluster_id, self, location_id=location_id, serve_nodes=serve_nodes, default_storage_type=default_storage_type, )
python
def cluster( self, cluster_id, location_id=None, serve_nodes=None, default_storage_type=None ): """Factory to create a cluster associated with this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_create_cluster] :end-before: [END bigtable_create_cluster] :type cluster_id: str :param cluster_id: The ID of the cluster. :type instance: :class:`~google.cloud.bigtable.instance.Instance` :param instance: The instance where the cluster resides. :type location_id: str :param location_id: (Creation Only) The location where this cluster's nodes and storage reside. For best performance, clients should be located as close as possible to this cluster. For list of supported locations refer to https://cloud.google.com/bigtable/docs/locations :type serve_nodes: int :param serve_nodes: (Optional) The number of nodes in the cluster. :type default_storage_type: int :param default_storage_type: (Optional) The type of storage Possible values are represented by the following constants: :data:`google.cloud.bigtable.enums.StorageType.SSD`. :data:`google.cloud.bigtable.enums.StorageType.SHD`, Defaults to :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`. :rtype: :class:`~google.cloud.bigtable.instance.Cluster` :returns: a cluster owned by this instance. """ return Cluster( cluster_id, self, location_id=location_id, serve_nodes=serve_nodes, default_storage_type=default_storage_type, )
[ "def", "cluster", "(", "self", ",", "cluster_id", ",", "location_id", "=", "None", ",", "serve_nodes", "=", "None", ",", "default_storage_type", "=", "None", ")", ":", "return", "Cluster", "(", "cluster_id", ",", "self", ",", "location_id", "=", "location_id", ",", "serve_nodes", "=", "serve_nodes", ",", "default_storage_type", "=", "default_storage_type", ",", ")" ]
Factory to create a cluster associated with this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_create_cluster] :end-before: [END bigtable_create_cluster] :type cluster_id: str :param cluster_id: The ID of the cluster. :type instance: :class:`~google.cloud.bigtable.instance.Instance` :param instance: The instance where the cluster resides. :type location_id: str :param location_id: (Creation Only) The location where this cluster's nodes and storage reside. For best performance, clients should be located as close as possible to this cluster. For list of supported locations refer to https://cloud.google.com/bigtable/docs/locations :type serve_nodes: int :param serve_nodes: (Optional) The number of nodes in the cluster. :type default_storage_type: int :param default_storage_type: (Optional) The type of storage Possible values are represented by the following constants: :data:`google.cloud.bigtable.enums.StorageType.SSD`. :data:`google.cloud.bigtable.enums.StorageType.SHD`, Defaults to :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`. :rtype: :class:`~google.cloud.bigtable.instance.Cluster` :returns: a cluster owned by this instance.
[ "Factory", "to", "create", "a", "cluster", "associated", "with", "this", "instance", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/instance.py#L507-L553
train
googleapis/google-cloud-python
bigtable/google/cloud/bigtable/instance.py
Instance.list_clusters
def list_clusters(self): """List the clusters in this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_list_clusters_on_instance] :end-before: [END bigtable_list_clusters_on_instance] :rtype: tuple :returns: (clusters, failed_locations), where 'clusters' is list of :class:`google.cloud.bigtable.instance.Cluster`, and 'failed_locations' is a list of locations which could not be resolved. """ resp = self._client.instance_admin_client.list_clusters(self.name) clusters = [Cluster.from_pb(cluster, self) for cluster in resp.clusters] return clusters, resp.failed_locations
python
def list_clusters(self): """List the clusters in this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_list_clusters_on_instance] :end-before: [END bigtable_list_clusters_on_instance] :rtype: tuple :returns: (clusters, failed_locations), where 'clusters' is list of :class:`google.cloud.bigtable.instance.Cluster`, and 'failed_locations' is a list of locations which could not be resolved. """ resp = self._client.instance_admin_client.list_clusters(self.name) clusters = [Cluster.from_pb(cluster, self) for cluster in resp.clusters] return clusters, resp.failed_locations
[ "def", "list_clusters", "(", "self", ")", ":", "resp", "=", "self", ".", "_client", ".", "instance_admin_client", ".", "list_clusters", "(", "self", ".", "name", ")", "clusters", "=", "[", "Cluster", ".", "from_pb", "(", "cluster", ",", "self", ")", "for", "cluster", "in", "resp", ".", "clusters", "]", "return", "clusters", ",", "resp", ".", "failed_locations" ]
List the clusters in this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_list_clusters_on_instance] :end-before: [END bigtable_list_clusters_on_instance] :rtype: tuple :returns: (clusters, failed_locations), where 'clusters' is list of :class:`google.cloud.bigtable.instance.Cluster`, and 'failed_locations' is a list of locations which could not be resolved.
[ "List", "the", "clusters", "in", "this", "instance", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/instance.py#L555-L573
train
googleapis/google-cloud-python
bigtable/google/cloud/bigtable/instance.py
Instance.table
def table(self, table_id, mutation_timeout=None, app_profile_id=None): """Factory to create a table associated with this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_create_table] :end-before: [END bigtable_create_table] :type table_id: str :param table_id: The ID of the table. :type app_profile_id: str :param app_profile_id: (Optional) The unique name of the AppProfile. :rtype: :class:`Table <google.cloud.bigtable.table.Table>` :returns: The table owned by this instance. """ return Table( table_id, self, app_profile_id=app_profile_id, mutation_timeout=mutation_timeout, )
python
def table(self, table_id, mutation_timeout=None, app_profile_id=None): """Factory to create a table associated with this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_create_table] :end-before: [END bigtable_create_table] :type table_id: str :param table_id: The ID of the table. :type app_profile_id: str :param app_profile_id: (Optional) The unique name of the AppProfile. :rtype: :class:`Table <google.cloud.bigtable.table.Table>` :returns: The table owned by this instance. """ return Table( table_id, self, app_profile_id=app_profile_id, mutation_timeout=mutation_timeout, )
[ "def", "table", "(", "self", ",", "table_id", ",", "mutation_timeout", "=", "None", ",", "app_profile_id", "=", "None", ")", ":", "return", "Table", "(", "table_id", ",", "self", ",", "app_profile_id", "=", "app_profile_id", ",", "mutation_timeout", "=", "mutation_timeout", ",", ")" ]
Factory to create a table associated with this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_create_table] :end-before: [END bigtable_create_table] :type table_id: str :param table_id: The ID of the table. :type app_profile_id: str :param app_profile_id: (Optional) The unique name of the AppProfile. :rtype: :class:`Table <google.cloud.bigtable.table.Table>` :returns: The table owned by this instance.
[ "Factory", "to", "create", "a", "table", "associated", "with", "this", "instance", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/instance.py#L575-L598
train
googleapis/google-cloud-python
bigtable/google/cloud/bigtable/instance.py
Instance.list_tables
def list_tables(self): """List the tables in this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_list_tables] :end-before: [END bigtable_list_tables] :rtype: list of :class:`Table <google.cloud.bigtable.table.Table>` :returns: The list of tables owned by the instance. :raises: :class:`ValueError <exceptions.ValueError>` if one of the returned tables has a name that is not of the expected format. """ table_list_pb = self._client.table_admin_client.list_tables(self.name) result = [] for table_pb in table_list_pb: table_prefix = self.name + "/tables/" if not table_pb.name.startswith(table_prefix): raise ValueError( "Table name {} not of expected format".format(table_pb.name) ) table_id = table_pb.name[len(table_prefix) :] result.append(self.table(table_id)) return result
python
def list_tables(self): """List the tables in this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_list_tables] :end-before: [END bigtable_list_tables] :rtype: list of :class:`Table <google.cloud.bigtable.table.Table>` :returns: The list of tables owned by the instance. :raises: :class:`ValueError <exceptions.ValueError>` if one of the returned tables has a name that is not of the expected format. """ table_list_pb = self._client.table_admin_client.list_tables(self.name) result = [] for table_pb in table_list_pb: table_prefix = self.name + "/tables/" if not table_pb.name.startswith(table_prefix): raise ValueError( "Table name {} not of expected format".format(table_pb.name) ) table_id = table_pb.name[len(table_prefix) :] result.append(self.table(table_id)) return result
[ "def", "list_tables", "(", "self", ")", ":", "table_list_pb", "=", "self", ".", "_client", ".", "table_admin_client", ".", "list_tables", "(", "self", ".", "name", ")", "result", "=", "[", "]", "for", "table_pb", "in", "table_list_pb", ":", "table_prefix", "=", "self", ".", "name", "+", "\"/tables/\"", "if", "not", "table_pb", ".", "name", ".", "startswith", "(", "table_prefix", ")", ":", "raise", "ValueError", "(", "\"Table name {} not of expected format\"", ".", "format", "(", "table_pb", ".", "name", ")", ")", "table_id", "=", "table_pb", ".", "name", "[", "len", "(", "table_prefix", ")", ":", "]", "result", ".", "append", "(", "self", ".", "table", "(", "table_id", ")", ")", "return", "result" ]
List the tables in this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_list_tables] :end-before: [END bigtable_list_tables] :rtype: list of :class:`Table <google.cloud.bigtable.table.Table>` :returns: The list of tables owned by the instance. :raises: :class:`ValueError <exceptions.ValueError>` if one of the returned tables has a name that is not of the expected format.
[ "List", "the", "tables", "in", "this", "instance", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/instance.py#L600-L626
train
googleapis/google-cloud-python
bigtable/google/cloud/bigtable/instance.py
Instance.app_profile
def app_profile( self, app_profile_id, routing_policy_type=None, description=None, cluster_id=None, allow_transactional_writes=None, ): """Factory to create AppProfile associated with this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_create_app_profile] :end-before: [END bigtable_create_app_profile] :type app_profile_id: str :param app_profile_id: The ID of the AppProfile. Must be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. :type: routing_policy_type: int :param: routing_policy_type: The type of the routing policy. Possible values are represented by the following constants: :data:`google.cloud.bigtable.enums.RoutingPolicyType.ANY` :data:`google.cloud.bigtable.enums.RoutingPolicyType.SINGLE` :type: description: str :param: description: (Optional) Long form description of the use case for this AppProfile. :type: cluster_id: str :param: cluster_id: (Optional) Unique cluster_id which is only required when routing_policy_type is ROUTING_POLICY_TYPE_SINGLE. :type: allow_transactional_writes: bool :param: allow_transactional_writes: (Optional) If true, allow transactional writes for ROUTING_POLICY_TYPE_SINGLE. :rtype: :class:`~google.cloud.bigtable.app_profile.AppProfile>` :returns: AppProfile for this instance. """ return AppProfile( app_profile_id, self, routing_policy_type=routing_policy_type, description=description, cluster_id=cluster_id, allow_transactional_writes=allow_transactional_writes, )
python
def app_profile( self, app_profile_id, routing_policy_type=None, description=None, cluster_id=None, allow_transactional_writes=None, ): """Factory to create AppProfile associated with this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_create_app_profile] :end-before: [END bigtable_create_app_profile] :type app_profile_id: str :param app_profile_id: The ID of the AppProfile. Must be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. :type: routing_policy_type: int :param: routing_policy_type: The type of the routing policy. Possible values are represented by the following constants: :data:`google.cloud.bigtable.enums.RoutingPolicyType.ANY` :data:`google.cloud.bigtable.enums.RoutingPolicyType.SINGLE` :type: description: str :param: description: (Optional) Long form description of the use case for this AppProfile. :type: cluster_id: str :param: cluster_id: (Optional) Unique cluster_id which is only required when routing_policy_type is ROUTING_POLICY_TYPE_SINGLE. :type: allow_transactional_writes: bool :param: allow_transactional_writes: (Optional) If true, allow transactional writes for ROUTING_POLICY_TYPE_SINGLE. :rtype: :class:`~google.cloud.bigtable.app_profile.AppProfile>` :returns: AppProfile for this instance. """ return AppProfile( app_profile_id, self, routing_policy_type=routing_policy_type, description=description, cluster_id=cluster_id, allow_transactional_writes=allow_transactional_writes, )
[ "def", "app_profile", "(", "self", ",", "app_profile_id", ",", "routing_policy_type", "=", "None", ",", "description", "=", "None", ",", "cluster_id", "=", "None", ",", "allow_transactional_writes", "=", "None", ",", ")", ":", "return", "AppProfile", "(", "app_profile_id", ",", "self", ",", "routing_policy_type", "=", "routing_policy_type", ",", "description", "=", "description", ",", "cluster_id", "=", "cluster_id", ",", "allow_transactional_writes", "=", "allow_transactional_writes", ",", ")" ]
Factory to create AppProfile associated with this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_create_app_profile] :end-before: [END bigtable_create_app_profile] :type app_profile_id: str :param app_profile_id: The ID of the AppProfile. Must be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. :type: routing_policy_type: int :param: routing_policy_type: The type of the routing policy. Possible values are represented by the following constants: :data:`google.cloud.bigtable.enums.RoutingPolicyType.ANY` :data:`google.cloud.bigtable.enums.RoutingPolicyType.SINGLE` :type: description: str :param: description: (Optional) Long form description of the use case for this AppProfile. :type: cluster_id: str :param: cluster_id: (Optional) Unique cluster_id which is only required when routing_policy_type is ROUTING_POLICY_TYPE_SINGLE. :type: allow_transactional_writes: bool :param: allow_transactional_writes: (Optional) If true, allow transactional writes for ROUTING_POLICY_TYPE_SINGLE. :rtype: :class:`~google.cloud.bigtable.app_profile.AppProfile>` :returns: AppProfile for this instance.
[ "Factory", "to", "create", "AppProfile", "associated", "with", "this", "instance", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/instance.py#L628-L679
train
googleapis/google-cloud-python
bigtable/google/cloud/bigtable/instance.py
Instance.list_app_profiles
def list_app_profiles(self): """Lists information about AppProfiles in an instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_list_app_profiles] :end-before: [END bigtable_list_app_profiles] :rtype: :list:[`~google.cloud.bigtable.app_profile.AppProfile`] :returns: A :list:[`~google.cloud.bigtable.app_profile.AppProfile`]. By default, this is a list of :class:`~google.cloud.bigtable.app_profile.AppProfile` instances. """ resp = self._client.instance_admin_client.list_app_profiles(self.name) return [AppProfile.from_pb(app_profile, self) for app_profile in resp]
python
def list_app_profiles(self): """Lists information about AppProfiles in an instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_list_app_profiles] :end-before: [END bigtable_list_app_profiles] :rtype: :list:[`~google.cloud.bigtable.app_profile.AppProfile`] :returns: A :list:[`~google.cloud.bigtable.app_profile.AppProfile`]. By default, this is a list of :class:`~google.cloud.bigtable.app_profile.AppProfile` instances. """ resp = self._client.instance_admin_client.list_app_profiles(self.name) return [AppProfile.from_pb(app_profile, self) for app_profile in resp]
[ "def", "list_app_profiles", "(", "self", ")", ":", "resp", "=", "self", ".", "_client", ".", "instance_admin_client", ".", "list_app_profiles", "(", "self", ".", "name", ")", "return", "[", "AppProfile", ".", "from_pb", "(", "app_profile", ",", "self", ")", "for", "app_profile", "in", "resp", "]" ]
Lists information about AppProfiles in an instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_list_app_profiles] :end-before: [END bigtable_list_app_profiles] :rtype: :list:[`~google.cloud.bigtable.app_profile.AppProfile`] :returns: A :list:[`~google.cloud.bigtable.app_profile.AppProfile`]. By default, this is a list of :class:`~google.cloud.bigtable.app_profile.AppProfile` instances.
[ "Lists", "information", "about", "AppProfiles", "in", "an", "instance", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/instance.py#L681-L697
train
googleapis/google-cloud-python
bigtable/noxfile.py
system
def system(session): """Run the system test suite.""" system_test_path = os.path.join("tests", "system.py") system_test_folder_path = os.path.join("tests", "system") # Sanity check: Only run tests if the environment variable is set. if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): session.skip("Credentials must be set via environment variable") system_test_exists = os.path.exists(system_test_path) system_test_folder_exists = os.path.exists(system_test_folder_path) # Sanity check: only run tests if found. if not system_test_exists and not system_test_folder_exists: session.skip("System tests were not found") # Use pre-release gRPC for system tests. session.install("--pre", "grpcio") # Install all test dependencies, then install this package into the # virtualenv's dist-packages. session.install("mock", "pytest") for local_dep in LOCAL_DEPS: session.install("-e", local_dep) session.install("-e", "../test_utils/") session.install("-e", ".") # Run py.test against the system tests. if system_test_exists: session.run("py.test", "--quiet", system_test_path, *session.posargs) if system_test_folder_exists: session.run("py.test", "--quiet", system_test_folder_path, *session.posargs)
python
def system(session): """Run the system test suite.""" system_test_path = os.path.join("tests", "system.py") system_test_folder_path = os.path.join("tests", "system") # Sanity check: Only run tests if the environment variable is set. if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): session.skip("Credentials must be set via environment variable") system_test_exists = os.path.exists(system_test_path) system_test_folder_exists = os.path.exists(system_test_folder_path) # Sanity check: only run tests if found. if not system_test_exists and not system_test_folder_exists: session.skip("System tests were not found") # Use pre-release gRPC for system tests. session.install("--pre", "grpcio") # Install all test dependencies, then install this package into the # virtualenv's dist-packages. session.install("mock", "pytest") for local_dep in LOCAL_DEPS: session.install("-e", local_dep) session.install("-e", "../test_utils/") session.install("-e", ".") # Run py.test against the system tests. if system_test_exists: session.run("py.test", "--quiet", system_test_path, *session.posargs) if system_test_folder_exists: session.run("py.test", "--quiet", system_test_folder_path, *session.posargs)
[ "def", "system", "(", "session", ")", ":", "system_test_path", "=", "os", ".", "path", ".", "join", "(", "\"tests\"", ",", "\"system.py\"", ")", "system_test_folder_path", "=", "os", ".", "path", ".", "join", "(", "\"tests\"", ",", "\"system\"", ")", "# Sanity check: Only run tests if the environment variable is set.", "if", "not", "os", ".", "environ", ".", "get", "(", "\"GOOGLE_APPLICATION_CREDENTIALS\"", ",", "\"\"", ")", ":", "session", ".", "skip", "(", "\"Credentials must be set via environment variable\"", ")", "system_test_exists", "=", "os", ".", "path", ".", "exists", "(", "system_test_path", ")", "system_test_folder_exists", "=", "os", ".", "path", ".", "exists", "(", "system_test_folder_path", ")", "# Sanity check: only run tests if found.", "if", "not", "system_test_exists", "and", "not", "system_test_folder_exists", ":", "session", ".", "skip", "(", "\"System tests were not found\"", ")", "# Use pre-release gRPC for system tests.", "session", ".", "install", "(", "\"--pre\"", ",", "\"grpcio\"", ")", "# Install all test dependencies, then install this package into the", "# virtualenv's dist-packages.", "session", ".", "install", "(", "\"mock\"", ",", "\"pytest\"", ")", "for", "local_dep", "in", "LOCAL_DEPS", ":", "session", ".", "install", "(", "\"-e\"", ",", "local_dep", ")", "session", ".", "install", "(", "\"-e\"", ",", "\"../test_utils/\"", ")", "session", ".", "install", "(", "\"-e\"", ",", "\".\"", ")", "# Run py.test against the system tests.", "if", "system_test_exists", ":", "session", ".", "run", "(", "\"py.test\"", ",", "\"--quiet\"", ",", "system_test_path", ",", "*", "session", ".", "posargs", ")", "if", "system_test_folder_exists", ":", "session", ".", "run", "(", "\"py.test\"", ",", "\"--quiet\"", ",", "system_test_folder_path", ",", "*", "session", ".", "posargs", ")" ]
Run the system test suite.
[ "Run", "the", "system", "test", "suite", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/noxfile.py#L94-L123
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
_reference_getter
def _reference_getter(table): """A :class:`~google.cloud.bigquery.table.TableReference` pointing to this table. Returns: google.cloud.bigquery.table.TableReference: pointer to this table. """ from google.cloud.bigquery import dataset dataset_ref = dataset.DatasetReference(table.project, table.dataset_id) return TableReference(dataset_ref, table.table_id)
python
def _reference_getter(table): """A :class:`~google.cloud.bigquery.table.TableReference` pointing to this table. Returns: google.cloud.bigquery.table.TableReference: pointer to this table. """ from google.cloud.bigquery import dataset dataset_ref = dataset.DatasetReference(table.project, table.dataset_id) return TableReference(dataset_ref, table.table_id)
[ "def", "_reference_getter", "(", "table", ")", ":", "from", "google", ".", "cloud", ".", "bigquery", "import", "dataset", "dataset_ref", "=", "dataset", ".", "DatasetReference", "(", "table", ".", "project", ",", "table", ".", "dataset_id", ")", "return", "TableReference", "(", "dataset_ref", ",", "table", ".", "table_id", ")" ]
A :class:`~google.cloud.bigquery.table.TableReference` pointing to this table. Returns: google.cloud.bigquery.table.TableReference: pointer to this table.
[ "A", ":", "class", ":", "~google", ".", "cloud", ".", "bigquery", ".", "table", ".", "TableReference", "pointing", "to", "this", "table", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L80-L90
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
_view_use_legacy_sql_getter
def _view_use_legacy_sql_getter(table): """bool: Specifies whether to execute the view with Legacy or Standard SQL. This boolean specifies whether to execute the view with Legacy SQL (:data:`True`) or Standard SQL (:data:`False`). The client side default is :data:`False`. The server-side default is :data:`True`. If this table is not a view, :data:`None` is returned. Raises: ValueError: For invalid value types. """ view = table._properties.get("view") if view is not None: # The server-side default for useLegacySql is True. return view.get("useLegacySql", True) # In some cases, such as in a table list no view object is present, but the # resource still represents a view. Use the type as a fallback. if table.table_type == "VIEW": # The server-side default for useLegacySql is True. return True
python
def _view_use_legacy_sql_getter(table): """bool: Specifies whether to execute the view with Legacy or Standard SQL. This boolean specifies whether to execute the view with Legacy SQL (:data:`True`) or Standard SQL (:data:`False`). The client side default is :data:`False`. The server-side default is :data:`True`. If this table is not a view, :data:`None` is returned. Raises: ValueError: For invalid value types. """ view = table._properties.get("view") if view is not None: # The server-side default for useLegacySql is True. return view.get("useLegacySql", True) # In some cases, such as in a table list no view object is present, but the # resource still represents a view. Use the type as a fallback. if table.table_type == "VIEW": # The server-side default for useLegacySql is True. return True
[ "def", "_view_use_legacy_sql_getter", "(", "table", ")", ":", "view", "=", "table", ".", "_properties", ".", "get", "(", "\"view\"", ")", "if", "view", "is", "not", "None", ":", "# The server-side default for useLegacySql is True.", "return", "view", ".", "get", "(", "\"useLegacySql\"", ",", "True", ")", "# In some cases, such as in a table list no view object is present, but the", "# resource still represents a view. Use the type as a fallback.", "if", "table", ".", "table_type", "==", "\"VIEW\"", ":", "# The server-side default for useLegacySql is True.", "return", "True" ]
bool: Specifies whether to execute the view with Legacy or Standard SQL. This boolean specifies whether to execute the view with Legacy SQL (:data:`True`) or Standard SQL (:data:`False`). The client side default is :data:`False`. The server-side default is :data:`True`. If this table is not a view, :data:`None` is returned. Raises: ValueError: For invalid value types.
[ "bool", ":", "Specifies", "whether", "to", "execute", "the", "view", "with", "Legacy", "or", "Standard", "SQL", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L93-L112
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
_row_from_mapping
def _row_from_mapping(mapping, schema): """Convert a mapping to a row tuple using the schema. Args: mapping (Dict[str, object]) Mapping of row data: must contain keys for all required fields in the schema. Keys which do not correspond to a field in the schema are ignored. schema (List[google.cloud.bigquery.schema.SchemaField]): The schema of the table destination for the rows Returns: Tuple[object]: Tuple whose elements are ordered according to the schema. Raises: ValueError: If schema is empty. """ if len(schema) == 0: raise ValueError(_TABLE_HAS_NO_SCHEMA) row = [] for field in schema: if field.mode == "REQUIRED": row.append(mapping[field.name]) elif field.mode == "REPEATED": row.append(mapping.get(field.name, ())) elif field.mode == "NULLABLE": row.append(mapping.get(field.name)) else: raise ValueError("Unknown field mode: {}".format(field.mode)) return tuple(row)
python
def _row_from_mapping(mapping, schema): """Convert a mapping to a row tuple using the schema. Args: mapping (Dict[str, object]) Mapping of row data: must contain keys for all required fields in the schema. Keys which do not correspond to a field in the schema are ignored. schema (List[google.cloud.bigquery.schema.SchemaField]): The schema of the table destination for the rows Returns: Tuple[object]: Tuple whose elements are ordered according to the schema. Raises: ValueError: If schema is empty. """ if len(schema) == 0: raise ValueError(_TABLE_HAS_NO_SCHEMA) row = [] for field in schema: if field.mode == "REQUIRED": row.append(mapping[field.name]) elif field.mode == "REPEATED": row.append(mapping.get(field.name, ())) elif field.mode == "NULLABLE": row.append(mapping.get(field.name)) else: raise ValueError("Unknown field mode: {}".format(field.mode)) return tuple(row)
[ "def", "_row_from_mapping", "(", "mapping", ",", "schema", ")", ":", "if", "len", "(", "schema", ")", "==", "0", ":", "raise", "ValueError", "(", "_TABLE_HAS_NO_SCHEMA", ")", "row", "=", "[", "]", "for", "field", "in", "schema", ":", "if", "field", ".", "mode", "==", "\"REQUIRED\"", ":", "row", ".", "append", "(", "mapping", "[", "field", ".", "name", "]", ")", "elif", "field", ".", "mode", "==", "\"REPEATED\"", ":", "row", ".", "append", "(", "mapping", ".", "get", "(", "field", ".", "name", ",", "(", ")", ")", ")", "elif", "field", ".", "mode", "==", "\"NULLABLE\"", ":", "row", ".", "append", "(", "mapping", ".", "get", "(", "field", ".", "name", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Unknown field mode: {}\"", ".", "format", "(", "field", ".", "mode", ")", ")", "return", "tuple", "(", "row", ")" ]
Convert a mapping to a row tuple using the schema. Args: mapping (Dict[str, object]) Mapping of row data: must contain keys for all required fields in the schema. Keys which do not correspond to a field in the schema are ignored. schema (List[google.cloud.bigquery.schema.SchemaField]): The schema of the table destination for the rows Returns: Tuple[object]: Tuple whose elements are ordered according to the schema. Raises: ValueError: If schema is empty.
[ "Convert", "a", "mapping", "to", "a", "row", "tuple", "using", "the", "schema", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L1105-L1136
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
_item_to_row
def _item_to_row(iterator, resource): """Convert a JSON row to the native object. .. note:: This assumes that the ``schema`` attribute has been added to the iterator after being created, which should be done by the caller. :type iterator: :class:`~google.api_core.page_iterator.Iterator` :param iterator: The iterator that is currently in use. :type resource: dict :param resource: An item to be converted to a row. :rtype: :class:`~google.cloud.bigquery.table.Row` :returns: The next row in the page. """ return Row( _helpers._row_tuple_from_json(resource, iterator.schema), iterator._field_to_index, )
python
def _item_to_row(iterator, resource): """Convert a JSON row to the native object. .. note:: This assumes that the ``schema`` attribute has been added to the iterator after being created, which should be done by the caller. :type iterator: :class:`~google.api_core.page_iterator.Iterator` :param iterator: The iterator that is currently in use. :type resource: dict :param resource: An item to be converted to a row. :rtype: :class:`~google.cloud.bigquery.table.Row` :returns: The next row in the page. """ return Row( _helpers._row_tuple_from_json(resource, iterator.schema), iterator._field_to_index, )
[ "def", "_item_to_row", "(", "iterator", ",", "resource", ")", ":", "return", "Row", "(", "_helpers", ".", "_row_tuple_from_json", "(", "resource", ",", "iterator", ".", "schema", ")", ",", "iterator", ".", "_field_to_index", ",", ")" ]
Convert a JSON row to the native object. .. note:: This assumes that the ``schema`` attribute has been added to the iterator after being created, which should be done by the caller. :type iterator: :class:`~google.api_core.page_iterator.Iterator` :param iterator: The iterator that is currently in use. :type resource: dict :param resource: An item to be converted to a row. :rtype: :class:`~google.cloud.bigquery.table.Row` :returns: The next row in the page.
[ "Convert", "a", "JSON", "row", "to", "the", "native", "object", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L1882-L1903
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
_rows_page_start
def _rows_page_start(iterator, page, response): """Grab total rows when :class:`~google.cloud.iterator.Page` starts. :type iterator: :class:`~google.api_core.page_iterator.Iterator` :param iterator: The iterator that is currently in use. :type page: :class:`~google.api_core.page_iterator.Page` :param page: The page that was just created. :type response: dict :param response: The JSON API response for a page of rows in a table. """ total_rows = response.get("totalRows") if total_rows is not None: total_rows = int(total_rows) iterator._total_rows = total_rows
python
def _rows_page_start(iterator, page, response): """Grab total rows when :class:`~google.cloud.iterator.Page` starts. :type iterator: :class:`~google.api_core.page_iterator.Iterator` :param iterator: The iterator that is currently in use. :type page: :class:`~google.api_core.page_iterator.Page` :param page: The page that was just created. :type response: dict :param response: The JSON API response for a page of rows in a table. """ total_rows = response.get("totalRows") if total_rows is not None: total_rows = int(total_rows) iterator._total_rows = total_rows
[ "def", "_rows_page_start", "(", "iterator", ",", "page", ",", "response", ")", ":", "total_rows", "=", "response", ".", "get", "(", "\"totalRows\"", ")", "if", "total_rows", "is", "not", "None", ":", "total_rows", "=", "int", "(", "total_rows", ")", "iterator", ".", "_total_rows", "=", "total_rows" ]
Grab total rows when :class:`~google.cloud.iterator.Page` starts. :type iterator: :class:`~google.api_core.page_iterator.Iterator` :param iterator: The iterator that is currently in use. :type page: :class:`~google.api_core.page_iterator.Page` :param page: The page that was just created. :type response: dict :param response: The JSON API response for a page of rows in a table.
[ "Grab", "total", "rows", "when", ":", "class", ":", "~google", ".", "cloud", ".", "iterator", ".", "Page", "starts", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L1907-L1922
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
_table_arg_to_table_ref
def _table_arg_to_table_ref(value, default_project=None): """Helper to convert a string or Table to TableReference. This function keeps TableReference and other kinds of objects unchanged. """ if isinstance(value, six.string_types): value = TableReference.from_string(value, default_project=default_project) if isinstance(value, (Table, TableListItem)): value = value.reference return value
python
def _table_arg_to_table_ref(value, default_project=None): """Helper to convert a string or Table to TableReference. This function keeps TableReference and other kinds of objects unchanged. """ if isinstance(value, six.string_types): value = TableReference.from_string(value, default_project=default_project) if isinstance(value, (Table, TableListItem)): value = value.reference return value
[ "def", "_table_arg_to_table_ref", "(", "value", ",", "default_project", "=", "None", ")", ":", "if", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "value", "=", "TableReference", ".", "from_string", "(", "value", ",", "default_project", "=", "default_project", ")", "if", "isinstance", "(", "value", ",", "(", "Table", ",", "TableListItem", ")", ")", ":", "value", "=", "value", ".", "reference", "return", "value" ]
Helper to convert a string or Table to TableReference. This function keeps TableReference and other kinds of objects unchanged.
[ "Helper", "to", "convert", "a", "string", "or", "Table", "to", "TableReference", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L1928-L1937
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
_table_arg_to_table
def _table_arg_to_table(value, default_project=None): """Helper to convert a string or TableReference to a Table. This function keeps Table and other kinds of objects unchanged. """ if isinstance(value, six.string_types): value = TableReference.from_string(value, default_project=default_project) if isinstance(value, TableReference): value = Table(value) if isinstance(value, TableListItem): newvalue = Table(value.reference) newvalue._properties = value._properties value = newvalue return value
python
def _table_arg_to_table(value, default_project=None): """Helper to convert a string or TableReference to a Table. This function keeps Table and other kinds of objects unchanged. """ if isinstance(value, six.string_types): value = TableReference.from_string(value, default_project=default_project) if isinstance(value, TableReference): value = Table(value) if isinstance(value, TableListItem): newvalue = Table(value.reference) newvalue._properties = value._properties value = newvalue return value
[ "def", "_table_arg_to_table", "(", "value", ",", "default_project", "=", "None", ")", ":", "if", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "value", "=", "TableReference", ".", "from_string", "(", "value", ",", "default_project", "=", "default_project", ")", "if", "isinstance", "(", "value", ",", "TableReference", ")", ":", "value", "=", "Table", "(", "value", ")", "if", "isinstance", "(", "value", ",", "TableListItem", ")", ":", "newvalue", "=", "Table", "(", "value", ".", "reference", ")", "newvalue", ".", "_properties", "=", "value", ".", "_properties", "value", "=", "newvalue", "return", "value" ]
Helper to convert a string or TableReference to a Table. This function keeps Table and other kinds of objects unchanged.
[ "Helper", "to", "convert", "a", "string", "or", "TableReference", "to", "a", "Table", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L1940-L1954
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
TableReference.from_string
def from_string(cls, table_id, default_project=None): """Construct a table reference from table ID string. Args: table_id (str): A table ID in standard SQL format. If ``default_project`` is not specified, this must included a project ID, dataset ID, and table ID, each separated by ``.``. default_project (str): Optional. The project ID to use when ``table_id`` does not include a project ID. Returns: TableReference: Table reference parsed from ``table_id``. Examples: >>> TableReference.from_string('my-project.mydataset.mytable') TableRef...(DatasetRef...('my-project', 'mydataset'), 'mytable') Raises: ValueError: If ``table_id`` is not a fully-qualified table ID in standard SQL format. """ from google.cloud.bigquery.dataset import DatasetReference ( output_project_id, output_dataset_id, output_table_id, ) = _helpers._parse_3_part_id( table_id, default_project=default_project, property_name="table_id" ) return cls( DatasetReference(output_project_id, output_dataset_id), output_table_id )
python
def from_string(cls, table_id, default_project=None): """Construct a table reference from table ID string. Args: table_id (str): A table ID in standard SQL format. If ``default_project`` is not specified, this must included a project ID, dataset ID, and table ID, each separated by ``.``. default_project (str): Optional. The project ID to use when ``table_id`` does not include a project ID. Returns: TableReference: Table reference parsed from ``table_id``. Examples: >>> TableReference.from_string('my-project.mydataset.mytable') TableRef...(DatasetRef...('my-project', 'mydataset'), 'mytable') Raises: ValueError: If ``table_id`` is not a fully-qualified table ID in standard SQL format. """ from google.cloud.bigquery.dataset import DatasetReference ( output_project_id, output_dataset_id, output_table_id, ) = _helpers._parse_3_part_id( table_id, default_project=default_project, property_name="table_id" ) return cls( DatasetReference(output_project_id, output_dataset_id), output_table_id )
[ "def", "from_string", "(", "cls", ",", "table_id", ",", "default_project", "=", "None", ")", ":", "from", "google", ".", "cloud", ".", "bigquery", ".", "dataset", "import", "DatasetReference", "(", "output_project_id", ",", "output_dataset_id", ",", "output_table_id", ",", ")", "=", "_helpers", ".", "_parse_3_part_id", "(", "table_id", ",", "default_project", "=", "default_project", ",", "property_name", "=", "\"table_id\"", ")", "return", "cls", "(", "DatasetReference", "(", "output_project_id", ",", "output_dataset_id", ")", ",", "output_table_id", ")" ]
Construct a table reference from table ID string. Args: table_id (str): A table ID in standard SQL format. If ``default_project`` is not specified, this must included a project ID, dataset ID, and table ID, each separated by ``.``. default_project (str): Optional. The project ID to use when ``table_id`` does not include a project ID. Returns: TableReference: Table reference parsed from ``table_id``. Examples: >>> TableReference.from_string('my-project.mydataset.mytable') TableRef...(DatasetRef...('my-project', 'mydataset'), 'mytable') Raises: ValueError: If ``table_id`` is not a fully-qualified table ID in standard SQL format.
[ "Construct", "a", "table", "reference", "from", "table", "ID", "string", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L224-L260
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
TableReference.from_api_repr
def from_api_repr(cls, resource): """Factory: construct a table reference given its API representation Args: resource (Dict[str, object]): Table reference representation returned from the API Returns: google.cloud.bigquery.table.TableReference: Table reference parsed from ``resource``. """ from google.cloud.bigquery.dataset import DatasetReference project = resource["projectId"] dataset_id = resource["datasetId"] table_id = resource["tableId"] return cls(DatasetReference(project, dataset_id), table_id)
python
def from_api_repr(cls, resource): """Factory: construct a table reference given its API representation Args: resource (Dict[str, object]): Table reference representation returned from the API Returns: google.cloud.bigquery.table.TableReference: Table reference parsed from ``resource``. """ from google.cloud.bigquery.dataset import DatasetReference project = resource["projectId"] dataset_id = resource["datasetId"] table_id = resource["tableId"] return cls(DatasetReference(project, dataset_id), table_id)
[ "def", "from_api_repr", "(", "cls", ",", "resource", ")", ":", "from", "google", ".", "cloud", ".", "bigquery", ".", "dataset", "import", "DatasetReference", "project", "=", "resource", "[", "\"projectId\"", "]", "dataset_id", "=", "resource", "[", "\"datasetId\"", "]", "table_id", "=", "resource", "[", "\"tableId\"", "]", "return", "cls", "(", "DatasetReference", "(", "project", ",", "dataset_id", ")", ",", "table_id", ")" ]
Factory: construct a table reference given its API representation Args: resource (Dict[str, object]): Table reference representation returned from the API Returns: google.cloud.bigquery.table.TableReference: Table reference parsed from ``resource``.
[ "Factory", ":", "construct", "a", "table", "reference", "given", "its", "API", "representation" ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L263-L279
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
TableReference.to_bqstorage
def to_bqstorage(self): """Construct a BigQuery Storage API representation of this table. Install the ``google-cloud-bigquery-storage`` package to use this feature. If the ``table_id`` contains a partition identifier (e.g. ``my_table$201812``) or a snapshot identifier (e.g. ``mytable@1234567890``), it is ignored. Use :class:`google.cloud.bigquery_storage_v1beta1.types.TableReadOptions` to filter rows by partition. Use :class:`google.cloud.bigquery_storage_v1beta1.types.TableModifiers` to select a specific snapshot to read from. Returns: google.cloud.bigquery_storage_v1beta1.types.TableReference: A reference to this table in the BigQuery Storage API. Raises: ValueError: If the :mod:`google.cloud.bigquery_storage_v1beta1` module cannot be imported. """ if bigquery_storage_v1beta1 is None: raise ValueError(_NO_BQSTORAGE_ERROR) table_ref = bigquery_storage_v1beta1.types.TableReference() table_ref.project_id = self._project table_ref.dataset_id = self._dataset_id table_id = self._table_id if "@" in table_id: table_id = table_id.split("@")[0] if "$" in table_id: table_id = table_id.split("$")[0] table_ref.table_id = table_id return table_ref
python
def to_bqstorage(self): """Construct a BigQuery Storage API representation of this table. Install the ``google-cloud-bigquery-storage`` package to use this feature. If the ``table_id`` contains a partition identifier (e.g. ``my_table$201812``) or a snapshot identifier (e.g. ``mytable@1234567890``), it is ignored. Use :class:`google.cloud.bigquery_storage_v1beta1.types.TableReadOptions` to filter rows by partition. Use :class:`google.cloud.bigquery_storage_v1beta1.types.TableModifiers` to select a specific snapshot to read from. Returns: google.cloud.bigquery_storage_v1beta1.types.TableReference: A reference to this table in the BigQuery Storage API. Raises: ValueError: If the :mod:`google.cloud.bigquery_storage_v1beta1` module cannot be imported. """ if bigquery_storage_v1beta1 is None: raise ValueError(_NO_BQSTORAGE_ERROR) table_ref = bigquery_storage_v1beta1.types.TableReference() table_ref.project_id = self._project table_ref.dataset_id = self._dataset_id table_id = self._table_id if "@" in table_id: table_id = table_id.split("@")[0] if "$" in table_id: table_id = table_id.split("$")[0] table_ref.table_id = table_id return table_ref
[ "def", "to_bqstorage", "(", "self", ")", ":", "if", "bigquery_storage_v1beta1", "is", "None", ":", "raise", "ValueError", "(", "_NO_BQSTORAGE_ERROR", ")", "table_ref", "=", "bigquery_storage_v1beta1", ".", "types", ".", "TableReference", "(", ")", "table_ref", ".", "project_id", "=", "self", ".", "_project", "table_ref", ".", "dataset_id", "=", "self", ".", "_dataset_id", "table_id", "=", "self", ".", "_table_id", "if", "\"@\"", "in", "table_id", ":", "table_id", "=", "table_id", ".", "split", "(", "\"@\"", ")", "[", "0", "]", "if", "\"$\"", "in", "table_id", ":", "table_id", "=", "table_id", ".", "split", "(", "\"$\"", ")", "[", "0", "]", "table_ref", ".", "table_id", "=", "table_id", "return", "table_ref" ]
Construct a BigQuery Storage API representation of this table. Install the ``google-cloud-bigquery-storage`` package to use this feature. If the ``table_id`` contains a partition identifier (e.g. ``my_table$201812``) or a snapshot identifier (e.g. ``mytable@1234567890``), it is ignored. Use :class:`google.cloud.bigquery_storage_v1beta1.types.TableReadOptions` to filter rows by partition. Use :class:`google.cloud.bigquery_storage_v1beta1.types.TableModifiers` to select a specific snapshot to read from. Returns: google.cloud.bigquery_storage_v1beta1.types.TableReference: A reference to this table in the BigQuery Storage API. Raises: ValueError: If the :mod:`google.cloud.bigquery_storage_v1beta1` module cannot be imported.
[ "Construct", "a", "BigQuery", "Storage", "API", "representation", "of", "this", "table", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L293-L332
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
Table.encryption_configuration
def encryption_configuration(self): """google.cloud.bigquery.table.EncryptionConfiguration: Custom encryption configuration for the table. Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None` if using default encryption. See `protecting data with Cloud KMS keys <https://cloud.google.com/bigquery/docs/customer-managed-encryption>`_ in the BigQuery documentation. """ prop = self._properties.get("encryptionConfiguration") if prop is not None: prop = EncryptionConfiguration.from_api_repr(prop) return prop
python
def encryption_configuration(self): """google.cloud.bigquery.table.EncryptionConfiguration: Custom encryption configuration for the table. Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None` if using default encryption. See `protecting data with Cloud KMS keys <https://cloud.google.com/bigquery/docs/customer-managed-encryption>`_ in the BigQuery documentation. """ prop = self._properties.get("encryptionConfiguration") if prop is not None: prop = EncryptionConfiguration.from_api_repr(prop) return prop
[ "def", "encryption_configuration", "(", "self", ")", ":", "prop", "=", "self", ".", "_properties", ".", "get", "(", "\"encryptionConfiguration\"", ")", "if", "prop", "is", "not", "None", ":", "prop", "=", "EncryptionConfiguration", ".", "from_api_repr", "(", "prop", ")", "return", "prop" ]
google.cloud.bigquery.table.EncryptionConfiguration: Custom encryption configuration for the table. Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None` if using default encryption. See `protecting data with Cloud KMS keys <https://cloud.google.com/bigquery/docs/customer-managed-encryption>`_ in the BigQuery documentation.
[ "google", ".", "cloud", ".", "bigquery", ".", "table", ".", "EncryptionConfiguration", ":", "Custom", "encryption", "configuration", "for", "the", "table", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L471-L485
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
Table.time_partitioning
def time_partitioning(self): """google.cloud.bigquery.table.TimePartitioning: Configures time-based partitioning for a table. Raises: ValueError: If the value is not :class:`TimePartitioning` or :data:`None`. """ prop = self._properties.get("timePartitioning") if prop is not None: return TimePartitioning.from_api_repr(prop)
python
def time_partitioning(self): """google.cloud.bigquery.table.TimePartitioning: Configures time-based partitioning for a table. Raises: ValueError: If the value is not :class:`TimePartitioning` or :data:`None`. """ prop = self._properties.get("timePartitioning") if prop is not None: return TimePartitioning.from_api_repr(prop)
[ "def", "time_partitioning", "(", "self", ")", ":", "prop", "=", "self", ".", "_properties", ".", "get", "(", "\"timePartitioning\"", ")", "if", "prop", "is", "not", "None", ":", "return", "TimePartitioning", ".", "from_api_repr", "(", "prop", ")" ]
google.cloud.bigquery.table.TimePartitioning: Configures time-based partitioning for a table. Raises: ValueError: If the value is not :class:`TimePartitioning` or :data:`None`.
[ "google", ".", "cloud", ".", "bigquery", ".", "table", ".", "TimePartitioning", ":", "Configures", "time", "-", "based", "partitioning", "for", "a", "table", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L565-L575
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
Table.partition_expiration
def partition_expiration(self): """Union[int, None]: Expiration time in milliseconds for a partition. If :attr:`partition_expiration` is set and :attr:`type_` is not set, :attr:`type_` will default to :attr:`~google.cloud.bigquery.table.TimePartitioningType.DAY`. """ warnings.warn( "This method will be deprecated in future versions. Please use " "Table.time_partitioning.expiration_ms instead.", PendingDeprecationWarning, stacklevel=2, ) if self.time_partitioning is not None: return self.time_partitioning.expiration_ms
python
def partition_expiration(self): """Union[int, None]: Expiration time in milliseconds for a partition. If :attr:`partition_expiration` is set and :attr:`type_` is not set, :attr:`type_` will default to :attr:`~google.cloud.bigquery.table.TimePartitioningType.DAY`. """ warnings.warn( "This method will be deprecated in future versions. Please use " "Table.time_partitioning.expiration_ms instead.", PendingDeprecationWarning, stacklevel=2, ) if self.time_partitioning is not None: return self.time_partitioning.expiration_ms
[ "def", "partition_expiration", "(", "self", ")", ":", "warnings", ".", "warn", "(", "\"This method will be deprecated in future versions. Please use \"", "\"Table.time_partitioning.expiration_ms instead.\"", ",", "PendingDeprecationWarning", ",", "stacklevel", "=", "2", ",", ")", "if", "self", ".", "time_partitioning", "is", "not", "None", ":", "return", "self", ".", "time_partitioning", ".", "expiration_ms" ]
Union[int, None]: Expiration time in milliseconds for a partition. If :attr:`partition_expiration` is set and :attr:`type_` is not set, :attr:`type_` will default to :attr:`~google.cloud.bigquery.table.TimePartitioningType.DAY`.
[ "Union", "[", "int", "None", "]", ":", "Expiration", "time", "in", "milliseconds", "for", "a", "partition", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L618-L632
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
Table.clustering_fields
def clustering_fields(self): """Union[List[str], None]: Fields defining clustering for the table (Defaults to :data:`None`). Clustering fields are immutable after table creation. .. note:: As of 2018-06-29, clustering fields cannot be set on a table which does not also have time partioning defined. """ prop = self._properties.get("clustering") if prop is not None: return list(prop.get("fields", ()))
python
def clustering_fields(self): """Union[List[str], None]: Fields defining clustering for the table (Defaults to :data:`None`). Clustering fields are immutable after table creation. .. note:: As of 2018-06-29, clustering fields cannot be set on a table which does not also have time partioning defined. """ prop = self._properties.get("clustering") if prop is not None: return list(prop.get("fields", ()))
[ "def", "clustering_fields", "(", "self", ")", ":", "prop", "=", "self", ".", "_properties", ".", "get", "(", "\"clustering\"", ")", "if", "prop", "is", "not", "None", ":", "return", "list", "(", "prop", ".", "get", "(", "\"fields\"", ",", "(", ")", ")", ")" ]
Union[List[str], None]: Fields defining clustering for the table (Defaults to :data:`None`). Clustering fields are immutable after table creation. .. note:: As of 2018-06-29, clustering fields cannot be set on a table which does not also have time partioning defined.
[ "Union", "[", "List", "[", "str", "]", "None", "]", ":", "Fields", "defining", "clustering", "for", "the", "table" ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L647-L661
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
Table.clustering_fields
def clustering_fields(self, value): """Union[List[str], None]: Fields defining clustering for the table (Defaults to :data:`None`). """ if value is not None: prop = self._properties.setdefault("clustering", {}) prop["fields"] = value else: if "clustering" in self._properties: del self._properties["clustering"]
python
def clustering_fields(self, value): """Union[List[str], None]: Fields defining clustering for the table (Defaults to :data:`None`). """ if value is not None: prop = self._properties.setdefault("clustering", {}) prop["fields"] = value else: if "clustering" in self._properties: del self._properties["clustering"]
[ "def", "clustering_fields", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "prop", "=", "self", ".", "_properties", ".", "setdefault", "(", "\"clustering\"", ",", "{", "}", ")", "prop", "[", "\"fields\"", "]", "=", "value", "else", ":", "if", "\"clustering\"", "in", "self", ".", "_properties", ":", "del", "self", ".", "_properties", "[", "\"clustering\"", "]" ]
Union[List[str], None]: Fields defining clustering for the table (Defaults to :data:`None`).
[ "Union", "[", "List", "[", "str", "]", "None", "]", ":", "Fields", "defining", "clustering", "for", "the", "table" ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L664-L674
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
Table.expires
def expires(self): """Union[datetime.datetime, None]: Datetime at which the table will be deleted. Raises: ValueError: For invalid value types. """ expiration_time = self._properties.get("expirationTime") if expiration_time is not None: # expiration_time will be in milliseconds. return google.cloud._helpers._datetime_from_microseconds( 1000.0 * float(expiration_time) )
python
def expires(self): """Union[datetime.datetime, None]: Datetime at which the table will be deleted. Raises: ValueError: For invalid value types. """ expiration_time = self._properties.get("expirationTime") if expiration_time is not None: # expiration_time will be in milliseconds. return google.cloud._helpers._datetime_from_microseconds( 1000.0 * float(expiration_time) )
[ "def", "expires", "(", "self", ")", ":", "expiration_time", "=", "self", ".", "_properties", ".", "get", "(", "\"expirationTime\"", ")", "if", "expiration_time", "is", "not", "None", ":", "# expiration_time will be in milliseconds.", "return", "google", ".", "cloud", ".", "_helpers", ".", "_datetime_from_microseconds", "(", "1000.0", "*", "float", "(", "expiration_time", ")", ")" ]
Union[datetime.datetime, None]: Datetime at which the table will be deleted. Raises: ValueError: For invalid value types.
[ "Union", "[", "datetime", ".", "datetime", "None", "]", ":", "Datetime", "at", "which", "the", "table", "will", "be", "deleted", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L693-L705
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
Table.external_data_configuration
def external_data_configuration(self): """Union[google.cloud.bigquery.ExternalConfig, None]: Configuration for an external data source (defaults to :data:`None`). Raises: ValueError: For invalid value types. """ prop = self._properties.get("externalDataConfiguration") if prop is not None: prop = ExternalConfig.from_api_repr(prop) return prop
python
def external_data_configuration(self): """Union[google.cloud.bigquery.ExternalConfig, None]: Configuration for an external data source (defaults to :data:`None`). Raises: ValueError: For invalid value types. """ prop = self._properties.get("externalDataConfiguration") if prop is not None: prop = ExternalConfig.from_api_repr(prop) return prop
[ "def", "external_data_configuration", "(", "self", ")", ":", "prop", "=", "self", ".", "_properties", ".", "get", "(", "\"externalDataConfiguration\"", ")", "if", "prop", "is", "not", "None", ":", "prop", "=", "ExternalConfig", ".", "from_api_repr", "(", "prop", ")", "return", "prop" ]
Union[google.cloud.bigquery.ExternalConfig, None]: Configuration for an external data source (defaults to :data:`None`). Raises: ValueError: For invalid value types.
[ "Union", "[", "google", ".", "cloud", ".", "bigquery", ".", "ExternalConfig", "None", "]", ":", "Configuration", "for", "an", "external", "data", "source", "(", "defaults", "to", ":", "data", ":", "None", ")", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L790-L800
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
Table.from_api_repr
def from_api_repr(cls, resource): """Factory: construct a table given its API representation Args: resource (Dict[str, object]): Table resource representation from the API Returns: google.cloud.bigquery.table.Table: Table parsed from ``resource``. Raises: KeyError: If the ``resource`` lacks the key ``'tableReference'``, or if the ``dict`` stored within the key ``'tableReference'`` lacks the keys ``'tableId'``, ``'projectId'``, or ``'datasetId'``. """ from google.cloud.bigquery import dataset if ( "tableReference" not in resource or "tableId" not in resource["tableReference"] ): raise KeyError( "Resource lacks required identity information:" '["tableReference"]["tableId"]' ) project_id = resource["tableReference"]["projectId"] table_id = resource["tableReference"]["tableId"] dataset_id = resource["tableReference"]["datasetId"] dataset_ref = dataset.DatasetReference(project_id, dataset_id) table = cls(dataset_ref.table(table_id)) table._properties = resource return table
python
def from_api_repr(cls, resource): """Factory: construct a table given its API representation Args: resource (Dict[str, object]): Table resource representation from the API Returns: google.cloud.bigquery.table.Table: Table parsed from ``resource``. Raises: KeyError: If the ``resource`` lacks the key ``'tableReference'``, or if the ``dict`` stored within the key ``'tableReference'`` lacks the keys ``'tableId'``, ``'projectId'``, or ``'datasetId'``. """ from google.cloud.bigquery import dataset if ( "tableReference" not in resource or "tableId" not in resource["tableReference"] ): raise KeyError( "Resource lacks required identity information:" '["tableReference"]["tableId"]' ) project_id = resource["tableReference"]["projectId"] table_id = resource["tableReference"]["tableId"] dataset_id = resource["tableReference"]["datasetId"] dataset_ref = dataset.DatasetReference(project_id, dataset_id) table = cls(dataset_ref.table(table_id)) table._properties = resource return table
[ "def", "from_api_repr", "(", "cls", ",", "resource", ")", ":", "from", "google", ".", "cloud", ".", "bigquery", "import", "dataset", "if", "(", "\"tableReference\"", "not", "in", "resource", "or", "\"tableId\"", "not", "in", "resource", "[", "\"tableReference\"", "]", ")", ":", "raise", "KeyError", "(", "\"Resource lacks required identity information:\"", "'[\"tableReference\"][\"tableId\"]'", ")", "project_id", "=", "resource", "[", "\"tableReference\"", "]", "[", "\"projectId\"", "]", "table_id", "=", "resource", "[", "\"tableReference\"", "]", "[", "\"tableId\"", "]", "dataset_id", "=", "resource", "[", "\"tableReference\"", "]", "[", "\"datasetId\"", "]", "dataset_ref", "=", "dataset", ".", "DatasetReference", "(", "project_id", ",", "dataset_id", ")", "table", "=", "cls", "(", "dataset_ref", ".", "table", "(", "table_id", ")", ")", "table", ".", "_properties", "=", "resource", "return", "table" ]
Factory: construct a table given its API representation Args: resource (Dict[str, object]): Table resource representation from the API Returns: google.cloud.bigquery.table.Table: Table parsed from ``resource``. Raises: KeyError: If the ``resource`` lacks the key ``'tableReference'``, or if the ``dict`` stored within the key ``'tableReference'`` lacks the keys ``'tableId'``, ``'projectId'``, or ``'datasetId'``.
[ "Factory", ":", "construct", "a", "table", "given", "its", "API", "representation" ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L836-L870
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
TableListItem.partitioning_type
def partitioning_type(self): """Union[str, None]: Time partitioning of the table if it is partitioned (Defaults to :data:`None`). """ warnings.warn( "This method will be deprecated in future versions. Please use " "TableListItem.time_partitioning.type_ instead.", PendingDeprecationWarning, stacklevel=2, ) if self.time_partitioning is not None: return self.time_partitioning.type_
python
def partitioning_type(self): """Union[str, None]: Time partitioning of the table if it is partitioned (Defaults to :data:`None`). """ warnings.warn( "This method will be deprecated in future versions. Please use " "TableListItem.time_partitioning.type_ instead.", PendingDeprecationWarning, stacklevel=2, ) if self.time_partitioning is not None: return self.time_partitioning.type_
[ "def", "partitioning_type", "(", "self", ")", ":", "warnings", ".", "warn", "(", "\"This method will be deprecated in future versions. Please use \"", "\"TableListItem.time_partitioning.type_ instead.\"", ",", "PendingDeprecationWarning", ",", "stacklevel", "=", "2", ",", ")", "if", "self", ".", "time_partitioning", "is", "not", "None", ":", "return", "self", ".", "time_partitioning", ".", "type_" ]
Union[str, None]: Time partitioning of the table if it is partitioned (Defaults to :data:`None`).
[ "Union", "[", "str", "None", "]", ":", "Time", "partitioning", "of", "the", "table", "if", "it", "is", "partitioned", "(", "Defaults", "to", ":", "data", ":", "None", ")", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L1016-L1027
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
Row.items
def items(self): """Return items as ``(key, value)`` pairs. Returns: Iterable[Tuple[str, object]]: The ``(key, value)`` pairs representing this row. Examples: >>> list(Row(('a', 'b'), {'x': 0, 'y': 1}).items()) [('x', 'a'), ('y', 'b')] """ for key, index in six.iteritems(self._xxx_field_to_index): yield (key, copy.deepcopy(self._xxx_values[index]))
python
def items(self): """Return items as ``(key, value)`` pairs. Returns: Iterable[Tuple[str, object]]: The ``(key, value)`` pairs representing this row. Examples: >>> list(Row(('a', 'b'), {'x': 0, 'y': 1}).items()) [('x', 'a'), ('y', 'b')] """ for key, index in six.iteritems(self._xxx_field_to_index): yield (key, copy.deepcopy(self._xxx_values[index]))
[ "def", "items", "(", "self", ")", ":", "for", "key", ",", "index", "in", "six", ".", "iteritems", "(", "self", ".", "_xxx_field_to_index", ")", ":", "yield", "(", "key", ",", "copy", ".", "deepcopy", "(", "self", ".", "_xxx_values", "[", "index", "]", ")", ")" ]
Return items as ``(key, value)`` pairs. Returns: Iterable[Tuple[str, object]]: The ``(key, value)`` pairs representing this row. Examples: >>> list(Row(('a', 'b'), {'x': 0, 'y': 1}).items()) [('x', 'a'), ('y', 'b')]
[ "Return", "items", "as", "(", "key", "value", ")", "pairs", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L1198-L1211
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
Row.get
def get(self, key, default=None): """Return a value for key, with a default value if it does not exist. Args: key (str): The key of the column to access default (object): The default value to use if the key does not exist. (Defaults to :data:`None`.) Returns: object: The value associated with the provided key, or a default value. Examples: When the key exists, the value associated with it is returned. >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('x') 'a' The default value is :data:`None` when the key does not exist. >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z') None The default value can be overrided with the ``default`` parameter. >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z', '') '' >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z', default = '') '' """ index = self._xxx_field_to_index.get(key) if index is None: return default return self._xxx_values[index]
python
def get(self, key, default=None): """Return a value for key, with a default value if it does not exist. Args: key (str): The key of the column to access default (object): The default value to use if the key does not exist. (Defaults to :data:`None`.) Returns: object: The value associated with the provided key, or a default value. Examples: When the key exists, the value associated with it is returned. >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('x') 'a' The default value is :data:`None` when the key does not exist. >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z') None The default value can be overrided with the ``default`` parameter. >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z', '') '' >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z', default = '') '' """ index = self._xxx_field_to_index.get(key) if index is None: return default return self._xxx_values[index]
[ "def", "get", "(", "self", ",", "key", ",", "default", "=", "None", ")", ":", "index", "=", "self", ".", "_xxx_field_to_index", ".", "get", "(", "key", ")", "if", "index", "is", "None", ":", "return", "default", "return", "self", ".", "_xxx_values", "[", "index", "]" ]
Return a value for key, with a default value if it does not exist. Args: key (str): The key of the column to access default (object): The default value to use if the key does not exist. (Defaults to :data:`None`.) Returns: object: The value associated with the provided key, or a default value. Examples: When the key exists, the value associated with it is returned. >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('x') 'a' The default value is :data:`None` when the key does not exist. >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z') None The default value can be overrided with the ``default`` parameter. >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z', '') '' >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z', default = '') ''
[ "Return", "a", "value", "for", "key", "with", "a", "default", "value", "if", "it", "does", "not", "exist", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L1213-L1248
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
RowIterator._get_next_page_response
def _get_next_page_response(self): """Requests the next page from the path provided. Returns: Dict[str, object]: The parsed JSON response of the next page's contents. """ params = self._get_query_params() if self._page_size is not None: params["maxResults"] = self._page_size return self.api_request( method=self._HTTP_METHOD, path=self.path, query_params=params )
python
def _get_next_page_response(self): """Requests the next page from the path provided. Returns: Dict[str, object]: The parsed JSON response of the next page's contents. """ params = self._get_query_params() if self._page_size is not None: params["maxResults"] = self._page_size return self.api_request( method=self._HTTP_METHOD, path=self.path, query_params=params )
[ "def", "_get_next_page_response", "(", "self", ")", ":", "params", "=", "self", ".", "_get_query_params", "(", ")", "if", "self", ".", "_page_size", "is", "not", "None", ":", "params", "[", "\"maxResults\"", "]", "=", "self", ".", "_page_size", "return", "self", ".", "api_request", "(", "method", "=", "self", ".", "_HTTP_METHOD", ",", "path", "=", "self", ".", "path", ",", "query_params", "=", "params", ")" ]
Requests the next page from the path provided. Returns: Dict[str, object]: The parsed JSON response of the next page's contents.
[ "Requests", "the", "next", "page", "from", "the", "path", "provided", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L1357-L1369
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
RowIterator._to_dataframe_tabledata_list
def _to_dataframe_tabledata_list(self, dtypes, progress_bar=None): """Use (slower, but free) tabledata.list to construct a DataFrame.""" column_names = [field.name for field in self.schema] frames = [] for page in iter(self.pages): current_frame = self._to_dataframe_dtypes(page, column_names, dtypes) frames.append(current_frame) if progress_bar is not None: # In some cases, the number of total rows is not populated # until the first page of rows is fetched. Update the # progress bar's total to keep an accurate count. progress_bar.total = progress_bar.total or self.total_rows progress_bar.update(len(current_frame)) if progress_bar is not None: # Indicate that the download has finished. progress_bar.close() return pandas.concat(frames)
python
def _to_dataframe_tabledata_list(self, dtypes, progress_bar=None): """Use (slower, but free) tabledata.list to construct a DataFrame.""" column_names = [field.name for field in self.schema] frames = [] for page in iter(self.pages): current_frame = self._to_dataframe_dtypes(page, column_names, dtypes) frames.append(current_frame) if progress_bar is not None: # In some cases, the number of total rows is not populated # until the first page of rows is fetched. Update the # progress bar's total to keep an accurate count. progress_bar.total = progress_bar.total or self.total_rows progress_bar.update(len(current_frame)) if progress_bar is not None: # Indicate that the download has finished. progress_bar.close() return pandas.concat(frames)
[ "def", "_to_dataframe_tabledata_list", "(", "self", ",", "dtypes", ",", "progress_bar", "=", "None", ")", ":", "column_names", "=", "[", "field", ".", "name", "for", "field", "in", "self", ".", "schema", "]", "frames", "=", "[", "]", "for", "page", "in", "iter", "(", "self", ".", "pages", ")", ":", "current_frame", "=", "self", ".", "_to_dataframe_dtypes", "(", "page", ",", "column_names", ",", "dtypes", ")", "frames", ".", "append", "(", "current_frame", ")", "if", "progress_bar", "is", "not", "None", ":", "# In some cases, the number of total rows is not populated", "# until the first page of rows is fetched. Update the", "# progress bar's total to keep an accurate count.", "progress_bar", ".", "total", "=", "progress_bar", ".", "total", "or", "self", ".", "total_rows", "progress_bar", ".", "update", "(", "len", "(", "current_frame", ")", ")", "if", "progress_bar", "is", "not", "None", ":", "# Indicate that the download has finished.", "progress_bar", ".", "close", "(", ")", "return", "pandas", ".", "concat", "(", "frames", ")" ]
Use (slower, but free) tabledata.list to construct a DataFrame.
[ "Use", "(", "slower", "but", "free", ")", "tabledata", ".", "list", "to", "construct", "a", "DataFrame", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L1390-L1410
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
RowIterator._to_dataframe_bqstorage
def _to_dataframe_bqstorage(self, bqstorage_client, dtypes, progress_bar=None): """Use (faster, but billable) BQ Storage API to construct DataFrame.""" if bigquery_storage_v1beta1 is None: raise ValueError(_NO_BQSTORAGE_ERROR) if "$" in self._table.table_id: raise ValueError( "Reading from a specific partition is not currently supported." ) if "@" in self._table.table_id: raise ValueError( "Reading from a specific snapshot is not currently supported." ) read_options = bigquery_storage_v1beta1.types.TableReadOptions() if self._selected_fields is not None: for field in self._selected_fields: read_options.selected_fields.append(field.name) session = bqstorage_client.create_read_session( self._table.to_bqstorage(), "projects/{}".format(self._project), read_options=read_options, ) # We need to parse the schema manually so that we can rearrange the # columns. schema = json.loads(session.avro_schema.schema) columns = [field["name"] for field in schema["fields"]] # Avoid reading rows from an empty table. pandas.concat will fail on an # empty list. if not session.streams: return pandas.DataFrame(columns=columns) # Use _to_dataframe_finished to notify worker threads when to quit. # See: https://stackoverflow.com/a/29237343/101923 self._to_dataframe_finished = False # Create a queue to track progress updates across threads. worker_queue = _NoopProgressBarQueue() progress_queue = None progress_thread = None if progress_bar is not None: worker_queue = queue.Queue() progress_queue = queue.Queue() progress_thread = threading.Thread( target=self._process_worker_updates, args=(worker_queue, progress_queue) ) progress_thread.start() def get_frames(pool): frames = [] # Manually submit jobs and wait for download to complete rather # than using pool.map because pool.map continues running in the # background even if there is an exception on the main thread. # See: https://github.com/googleapis/google-cloud-python/pull/7698 not_done = [ pool.submit( self._to_dataframe_bqstorage_stream, bqstorage_client, dtypes, columns, session, stream, worker_queue, ) for stream in session.streams ] while not_done: done, not_done = concurrent.futures.wait( not_done, timeout=_PROGRESS_INTERVAL ) frames.extend([future.result() for future in done]) # The progress bar needs to update on the main thread to avoid # contention over stdout / stderr. self._process_progress_updates(progress_queue, progress_bar) return frames with concurrent.futures.ThreadPoolExecutor() as pool: try: frames = get_frames(pool) finally: # No need for a lock because reading/replacing a variable is # defined to be an atomic operation in the Python language # definition (enforced by the global interpreter lock). self._to_dataframe_finished = True # Shutdown all background threads, now that they should know to # exit early. pool.shutdown(wait=True) if progress_thread is not None: progress_thread.join() # Update the progress bar one last time to close it. self._process_progress_updates(progress_queue, progress_bar) return pandas.concat(frames)
python
def _to_dataframe_bqstorage(self, bqstorage_client, dtypes, progress_bar=None): """Use (faster, but billable) BQ Storage API to construct DataFrame.""" if bigquery_storage_v1beta1 is None: raise ValueError(_NO_BQSTORAGE_ERROR) if "$" in self._table.table_id: raise ValueError( "Reading from a specific partition is not currently supported." ) if "@" in self._table.table_id: raise ValueError( "Reading from a specific snapshot is not currently supported." ) read_options = bigquery_storage_v1beta1.types.TableReadOptions() if self._selected_fields is not None: for field in self._selected_fields: read_options.selected_fields.append(field.name) session = bqstorage_client.create_read_session( self._table.to_bqstorage(), "projects/{}".format(self._project), read_options=read_options, ) # We need to parse the schema manually so that we can rearrange the # columns. schema = json.loads(session.avro_schema.schema) columns = [field["name"] for field in schema["fields"]] # Avoid reading rows from an empty table. pandas.concat will fail on an # empty list. if not session.streams: return pandas.DataFrame(columns=columns) # Use _to_dataframe_finished to notify worker threads when to quit. # See: https://stackoverflow.com/a/29237343/101923 self._to_dataframe_finished = False # Create a queue to track progress updates across threads. worker_queue = _NoopProgressBarQueue() progress_queue = None progress_thread = None if progress_bar is not None: worker_queue = queue.Queue() progress_queue = queue.Queue() progress_thread = threading.Thread( target=self._process_worker_updates, args=(worker_queue, progress_queue) ) progress_thread.start() def get_frames(pool): frames = [] # Manually submit jobs and wait for download to complete rather # than using pool.map because pool.map continues running in the # background even if there is an exception on the main thread. # See: https://github.com/googleapis/google-cloud-python/pull/7698 not_done = [ pool.submit( self._to_dataframe_bqstorage_stream, bqstorage_client, dtypes, columns, session, stream, worker_queue, ) for stream in session.streams ] while not_done: done, not_done = concurrent.futures.wait( not_done, timeout=_PROGRESS_INTERVAL ) frames.extend([future.result() for future in done]) # The progress bar needs to update on the main thread to avoid # contention over stdout / stderr. self._process_progress_updates(progress_queue, progress_bar) return frames with concurrent.futures.ThreadPoolExecutor() as pool: try: frames = get_frames(pool) finally: # No need for a lock because reading/replacing a variable is # defined to be an atomic operation in the Python language # definition (enforced by the global interpreter lock). self._to_dataframe_finished = True # Shutdown all background threads, now that they should know to # exit early. pool.shutdown(wait=True) if progress_thread is not None: progress_thread.join() # Update the progress bar one last time to close it. self._process_progress_updates(progress_queue, progress_bar) return pandas.concat(frames)
[ "def", "_to_dataframe_bqstorage", "(", "self", ",", "bqstorage_client", ",", "dtypes", ",", "progress_bar", "=", "None", ")", ":", "if", "bigquery_storage_v1beta1", "is", "None", ":", "raise", "ValueError", "(", "_NO_BQSTORAGE_ERROR", ")", "if", "\"$\"", "in", "self", ".", "_table", ".", "table_id", ":", "raise", "ValueError", "(", "\"Reading from a specific partition is not currently supported.\"", ")", "if", "\"@\"", "in", "self", ".", "_table", ".", "table_id", ":", "raise", "ValueError", "(", "\"Reading from a specific snapshot is not currently supported.\"", ")", "read_options", "=", "bigquery_storage_v1beta1", ".", "types", ".", "TableReadOptions", "(", ")", "if", "self", ".", "_selected_fields", "is", "not", "None", ":", "for", "field", "in", "self", ".", "_selected_fields", ":", "read_options", ".", "selected_fields", ".", "append", "(", "field", ".", "name", ")", "session", "=", "bqstorage_client", ".", "create_read_session", "(", "self", ".", "_table", ".", "to_bqstorage", "(", ")", ",", "\"projects/{}\"", ".", "format", "(", "self", ".", "_project", ")", ",", "read_options", "=", "read_options", ",", ")", "# We need to parse the schema manually so that we can rearrange the", "# columns.", "schema", "=", "json", ".", "loads", "(", "session", ".", "avro_schema", ".", "schema", ")", "columns", "=", "[", "field", "[", "\"name\"", "]", "for", "field", "in", "schema", "[", "\"fields\"", "]", "]", "# Avoid reading rows from an empty table. pandas.concat will fail on an", "# empty list.", "if", "not", "session", ".", "streams", ":", "return", "pandas", ".", "DataFrame", "(", "columns", "=", "columns", ")", "# Use _to_dataframe_finished to notify worker threads when to quit.", "# See: https://stackoverflow.com/a/29237343/101923", "self", ".", "_to_dataframe_finished", "=", "False", "# Create a queue to track progress updates across threads.", "worker_queue", "=", "_NoopProgressBarQueue", "(", ")", "progress_queue", "=", "None", "progress_thread", "=", "None", "if", "progress_bar", "is", "not", "None", ":", "worker_queue", "=", "queue", ".", "Queue", "(", ")", "progress_queue", "=", "queue", ".", "Queue", "(", ")", "progress_thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_process_worker_updates", ",", "args", "=", "(", "worker_queue", ",", "progress_queue", ")", ")", "progress_thread", ".", "start", "(", ")", "def", "get_frames", "(", "pool", ")", ":", "frames", "=", "[", "]", "# Manually submit jobs and wait for download to complete rather", "# than using pool.map because pool.map continues running in the", "# background even if there is an exception on the main thread.", "# See: https://github.com/googleapis/google-cloud-python/pull/7698", "not_done", "=", "[", "pool", ".", "submit", "(", "self", ".", "_to_dataframe_bqstorage_stream", ",", "bqstorage_client", ",", "dtypes", ",", "columns", ",", "session", ",", "stream", ",", "worker_queue", ",", ")", "for", "stream", "in", "session", ".", "streams", "]", "while", "not_done", ":", "done", ",", "not_done", "=", "concurrent", ".", "futures", ".", "wait", "(", "not_done", ",", "timeout", "=", "_PROGRESS_INTERVAL", ")", "frames", ".", "extend", "(", "[", "future", ".", "result", "(", ")", "for", "future", "in", "done", "]", ")", "# The progress bar needs to update on the main thread to avoid", "# contention over stdout / stderr.", "self", ".", "_process_progress_updates", "(", "progress_queue", ",", "progress_bar", ")", "return", "frames", "with", "concurrent", ".", "futures", ".", "ThreadPoolExecutor", "(", ")", "as", "pool", ":", "try", ":", "frames", "=", "get_frames", "(", "pool", ")", "finally", ":", "# No need for a lock because reading/replacing a variable is", "# defined to be an atomic operation in the Python language", "# definition (enforced by the global interpreter lock).", "self", ".", "_to_dataframe_finished", "=", "True", "# Shutdown all background threads, now that they should know to", "# exit early.", "pool", ".", "shutdown", "(", "wait", "=", "True", ")", "if", "progress_thread", "is", "not", "None", ":", "progress_thread", ".", "join", "(", ")", "# Update the progress bar one last time to close it.", "self", ".", "_process_progress_updates", "(", "progress_queue", ",", "progress_bar", ")", "return", "pandas", ".", "concat", "(", "frames", ")" ]
Use (faster, but billable) BQ Storage API to construct DataFrame.
[ "Use", "(", "faster", "but", "billable", ")", "BQ", "Storage", "API", "to", "construct", "DataFrame", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L1480-L1580
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
RowIterator._get_progress_bar
def _get_progress_bar(self, progress_bar_type): """Construct a tqdm progress bar object, if tqdm is installed.""" if tqdm is None: if progress_bar_type is not None: warnings.warn(_NO_TQDM_ERROR, UserWarning, stacklevel=3) return None description = "Downloading" unit = "rows" try: if progress_bar_type == "tqdm": return tqdm.tqdm(desc=description, total=self.total_rows, unit=unit) elif progress_bar_type == "tqdm_notebook": return tqdm.tqdm_notebook( desc=description, total=self.total_rows, unit=unit ) elif progress_bar_type == "tqdm_gui": return tqdm.tqdm_gui(desc=description, total=self.total_rows, unit=unit) except (KeyError, TypeError): # Protect ourselves from any tqdm errors. In case of # unexpected tqdm behavior, just fall back to showing # no progress bar. warnings.warn(_NO_TQDM_ERROR, UserWarning, stacklevel=3) return None
python
def _get_progress_bar(self, progress_bar_type): """Construct a tqdm progress bar object, if tqdm is installed.""" if tqdm is None: if progress_bar_type is not None: warnings.warn(_NO_TQDM_ERROR, UserWarning, stacklevel=3) return None description = "Downloading" unit = "rows" try: if progress_bar_type == "tqdm": return tqdm.tqdm(desc=description, total=self.total_rows, unit=unit) elif progress_bar_type == "tqdm_notebook": return tqdm.tqdm_notebook( desc=description, total=self.total_rows, unit=unit ) elif progress_bar_type == "tqdm_gui": return tqdm.tqdm_gui(desc=description, total=self.total_rows, unit=unit) except (KeyError, TypeError): # Protect ourselves from any tqdm errors. In case of # unexpected tqdm behavior, just fall back to showing # no progress bar. warnings.warn(_NO_TQDM_ERROR, UserWarning, stacklevel=3) return None
[ "def", "_get_progress_bar", "(", "self", ",", "progress_bar_type", ")", ":", "if", "tqdm", "is", "None", ":", "if", "progress_bar_type", "is", "not", "None", ":", "warnings", ".", "warn", "(", "_NO_TQDM_ERROR", ",", "UserWarning", ",", "stacklevel", "=", "3", ")", "return", "None", "description", "=", "\"Downloading\"", "unit", "=", "\"rows\"", "try", ":", "if", "progress_bar_type", "==", "\"tqdm\"", ":", "return", "tqdm", ".", "tqdm", "(", "desc", "=", "description", ",", "total", "=", "self", ".", "total_rows", ",", "unit", "=", "unit", ")", "elif", "progress_bar_type", "==", "\"tqdm_notebook\"", ":", "return", "tqdm", ".", "tqdm_notebook", "(", "desc", "=", "description", ",", "total", "=", "self", ".", "total_rows", ",", "unit", "=", "unit", ")", "elif", "progress_bar_type", "==", "\"tqdm_gui\"", ":", "return", "tqdm", ".", "tqdm_gui", "(", "desc", "=", "description", ",", "total", "=", "self", ".", "total_rows", ",", "unit", "=", "unit", ")", "except", "(", "KeyError", ",", "TypeError", ")", ":", "# Protect ourselves from any tqdm errors. In case of", "# unexpected tqdm behavior, just fall back to showing", "# no progress bar.", "warnings", ".", "warn", "(", "_NO_TQDM_ERROR", ",", "UserWarning", ",", "stacklevel", "=", "3", ")", "return", "None" ]
Construct a tqdm progress bar object, if tqdm is installed.
[ "Construct", "a", "tqdm", "progress", "bar", "object", "if", "tqdm", "is", "installed", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L1582-L1606
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
RowIterator.to_dataframe
def to_dataframe(self, bqstorage_client=None, dtypes=None, progress_bar_type=None): """Create a pandas DataFrame by loading all pages of a query. Args: bqstorage_client ( \ google.cloud.bigquery_storage_v1beta1.BigQueryStorageClient \ ): **Beta Feature** Optional. A BigQuery Storage API client. If supplied, use the faster BigQuery Storage API to fetch rows from BigQuery. This API is a billable API. This method requires the ``fastavro`` and ``google-cloud-bigquery-storage`` libraries. Reading from a specific partition or snapshot is not currently supported by this method. **Caution**: There is a known issue reading small anonymous query result tables with the BQ Storage API. When a problem is encountered reading a table, the tabledata.list method from the BigQuery API is used, instead. dtypes ( \ Map[str, Union[str, pandas.Series.dtype]] \ ): Optional. A dictionary of column names pandas ``dtype``s. The provided ``dtype`` is used when constructing the series for the column specified. Otherwise, the default pandas behavior is used. progress_bar_type (Optional[str]): If set, use the `tqdm <https://tqdm.github.io/>`_ library to display a progress bar while the data downloads. Install the ``tqdm`` package to use this feature. Possible values of ``progress_bar_type`` include: ``None`` No progress bar. ``'tqdm'`` Use the :func:`tqdm.tqdm` function to print a progress bar to :data:`sys.stderr`. ``'tqdm_notebook'`` Use the :func:`tqdm.tqdm_notebook` function to display a progress bar as a Jupyter notebook widget. ``'tqdm_gui'`` Use the :func:`tqdm.tqdm_gui` function to display a progress bar as a graphical dialog box. ..versionadded:: 1.11.0 Returns: pandas.DataFrame: A :class:`~pandas.DataFrame` populated with row data and column headers from the query results. The column headers are derived from the destination table's schema. Raises: ValueError: If the :mod:`pandas` library cannot be imported, or the :mod:`google.cloud.bigquery_storage_v1beta1` module is required but cannot be imported. """ if pandas is None: raise ValueError(_NO_PANDAS_ERROR) if dtypes is None: dtypes = {} progress_bar = self._get_progress_bar(progress_bar_type) if bqstorage_client is not None: try: return self._to_dataframe_bqstorage( bqstorage_client, dtypes, progress_bar=progress_bar ) except google.api_core.exceptions.Forbidden: # Don't hide errors such as insufficient permissions to create # a read session, or the API is not enabled. Both of those are # clearly problems if the developer has explicitly asked for # BigQuery Storage API support. raise except google.api_core.exceptions.GoogleAPICallError: # There is a known issue with reading from small anonymous # query results tables, so some errors are expected. Rather # than throw those errors, try reading the DataFrame again, but # with the tabledata.list API. pass return self._to_dataframe_tabledata_list(dtypes, progress_bar=progress_bar)
python
def to_dataframe(self, bqstorage_client=None, dtypes=None, progress_bar_type=None): """Create a pandas DataFrame by loading all pages of a query. Args: bqstorage_client ( \ google.cloud.bigquery_storage_v1beta1.BigQueryStorageClient \ ): **Beta Feature** Optional. A BigQuery Storage API client. If supplied, use the faster BigQuery Storage API to fetch rows from BigQuery. This API is a billable API. This method requires the ``fastavro`` and ``google-cloud-bigquery-storage`` libraries. Reading from a specific partition or snapshot is not currently supported by this method. **Caution**: There is a known issue reading small anonymous query result tables with the BQ Storage API. When a problem is encountered reading a table, the tabledata.list method from the BigQuery API is used, instead. dtypes ( \ Map[str, Union[str, pandas.Series.dtype]] \ ): Optional. A dictionary of column names pandas ``dtype``s. The provided ``dtype`` is used when constructing the series for the column specified. Otherwise, the default pandas behavior is used. progress_bar_type (Optional[str]): If set, use the `tqdm <https://tqdm.github.io/>`_ library to display a progress bar while the data downloads. Install the ``tqdm`` package to use this feature. Possible values of ``progress_bar_type`` include: ``None`` No progress bar. ``'tqdm'`` Use the :func:`tqdm.tqdm` function to print a progress bar to :data:`sys.stderr`. ``'tqdm_notebook'`` Use the :func:`tqdm.tqdm_notebook` function to display a progress bar as a Jupyter notebook widget. ``'tqdm_gui'`` Use the :func:`tqdm.tqdm_gui` function to display a progress bar as a graphical dialog box. ..versionadded:: 1.11.0 Returns: pandas.DataFrame: A :class:`~pandas.DataFrame` populated with row data and column headers from the query results. The column headers are derived from the destination table's schema. Raises: ValueError: If the :mod:`pandas` library cannot be imported, or the :mod:`google.cloud.bigquery_storage_v1beta1` module is required but cannot be imported. """ if pandas is None: raise ValueError(_NO_PANDAS_ERROR) if dtypes is None: dtypes = {} progress_bar = self._get_progress_bar(progress_bar_type) if bqstorage_client is not None: try: return self._to_dataframe_bqstorage( bqstorage_client, dtypes, progress_bar=progress_bar ) except google.api_core.exceptions.Forbidden: # Don't hide errors such as insufficient permissions to create # a read session, or the API is not enabled. Both of those are # clearly problems if the developer has explicitly asked for # BigQuery Storage API support. raise except google.api_core.exceptions.GoogleAPICallError: # There is a known issue with reading from small anonymous # query results tables, so some errors are expected. Rather # than throw those errors, try reading the DataFrame again, but # with the tabledata.list API. pass return self._to_dataframe_tabledata_list(dtypes, progress_bar=progress_bar)
[ "def", "to_dataframe", "(", "self", ",", "bqstorage_client", "=", "None", ",", "dtypes", "=", "None", ",", "progress_bar_type", "=", "None", ")", ":", "if", "pandas", "is", "None", ":", "raise", "ValueError", "(", "_NO_PANDAS_ERROR", ")", "if", "dtypes", "is", "None", ":", "dtypes", "=", "{", "}", "progress_bar", "=", "self", ".", "_get_progress_bar", "(", "progress_bar_type", ")", "if", "bqstorage_client", "is", "not", "None", ":", "try", ":", "return", "self", ".", "_to_dataframe_bqstorage", "(", "bqstorage_client", ",", "dtypes", ",", "progress_bar", "=", "progress_bar", ")", "except", "google", ".", "api_core", ".", "exceptions", ".", "Forbidden", ":", "# Don't hide errors such as insufficient permissions to create", "# a read session, or the API is not enabled. Both of those are", "# clearly problems if the developer has explicitly asked for", "# BigQuery Storage API support.", "raise", "except", "google", ".", "api_core", ".", "exceptions", ".", "GoogleAPICallError", ":", "# There is a known issue with reading from small anonymous", "# query results tables, so some errors are expected. Rather", "# than throw those errors, try reading the DataFrame again, but", "# with the tabledata.list API.", "pass", "return", "self", ".", "_to_dataframe_tabledata_list", "(", "dtypes", ",", "progress_bar", "=", "progress_bar", ")" ]
Create a pandas DataFrame by loading all pages of a query. Args: bqstorage_client ( \ google.cloud.bigquery_storage_v1beta1.BigQueryStorageClient \ ): **Beta Feature** Optional. A BigQuery Storage API client. If supplied, use the faster BigQuery Storage API to fetch rows from BigQuery. This API is a billable API. This method requires the ``fastavro`` and ``google-cloud-bigquery-storage`` libraries. Reading from a specific partition or snapshot is not currently supported by this method. **Caution**: There is a known issue reading small anonymous query result tables with the BQ Storage API. When a problem is encountered reading a table, the tabledata.list method from the BigQuery API is used, instead. dtypes ( \ Map[str, Union[str, pandas.Series.dtype]] \ ): Optional. A dictionary of column names pandas ``dtype``s. The provided ``dtype`` is used when constructing the series for the column specified. Otherwise, the default pandas behavior is used. progress_bar_type (Optional[str]): If set, use the `tqdm <https://tqdm.github.io/>`_ library to display a progress bar while the data downloads. Install the ``tqdm`` package to use this feature. Possible values of ``progress_bar_type`` include: ``None`` No progress bar. ``'tqdm'`` Use the :func:`tqdm.tqdm` function to print a progress bar to :data:`sys.stderr`. ``'tqdm_notebook'`` Use the :func:`tqdm.tqdm_notebook` function to display a progress bar as a Jupyter notebook widget. ``'tqdm_gui'`` Use the :func:`tqdm.tqdm_gui` function to display a progress bar as a graphical dialog box. ..versionadded:: 1.11.0 Returns: pandas.DataFrame: A :class:`~pandas.DataFrame` populated with row data and column headers from the query results. The column headers are derived from the destination table's schema. Raises: ValueError: If the :mod:`pandas` library cannot be imported, or the :mod:`google.cloud.bigquery_storage_v1beta1` module is required but cannot be imported.
[ "Create", "a", "pandas", "DataFrame", "by", "loading", "all", "pages", "of", "a", "query", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L1608-L1695
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
_EmptyRowIterator.to_dataframe
def to_dataframe(self, bqstorage_client=None, dtypes=None, progress_bar_type=None): """Create an empty dataframe. Args: bqstorage_client (Any): Ignored. Added for compatibility with RowIterator. dtypes (Any): Ignored. Added for compatibility with RowIterator. progress_bar_type (Any): Ignored. Added for compatibility with RowIterator. Returns: pandas.DataFrame: An empty :class:`~pandas.DataFrame`. """ if pandas is None: raise ValueError(_NO_PANDAS_ERROR) return pandas.DataFrame()
python
def to_dataframe(self, bqstorage_client=None, dtypes=None, progress_bar_type=None): """Create an empty dataframe. Args: bqstorage_client (Any): Ignored. Added for compatibility with RowIterator. dtypes (Any): Ignored. Added for compatibility with RowIterator. progress_bar_type (Any): Ignored. Added for compatibility with RowIterator. Returns: pandas.DataFrame: An empty :class:`~pandas.DataFrame`. """ if pandas is None: raise ValueError(_NO_PANDAS_ERROR) return pandas.DataFrame()
[ "def", "to_dataframe", "(", "self", ",", "bqstorage_client", "=", "None", ",", "dtypes", "=", "None", ",", "progress_bar_type", "=", "None", ")", ":", "if", "pandas", "is", "None", ":", "raise", "ValueError", "(", "_NO_PANDAS_ERROR", ")", "return", "pandas", ".", "DataFrame", "(", ")" ]
Create an empty dataframe. Args: bqstorage_client (Any): Ignored. Added for compatibility with RowIterator. dtypes (Any): Ignored. Added for compatibility with RowIterator. progress_bar_type (Any): Ignored. Added for compatibility with RowIterator. Returns: pandas.DataFrame: An empty :class:`~pandas.DataFrame`.
[ "Create", "an", "empty", "dataframe", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L1710-L1727
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
TimePartitioning.from_api_repr
def from_api_repr(cls, api_repr): """Return a :class:`TimePartitioning` object deserialized from a dict. This method creates a new ``TimePartitioning`` instance that points to the ``api_repr`` parameter as its internal properties dict. This means that when a ``TimePartitioning`` instance is stored as a property of another object, any changes made at the higher level will also appear here:: >>> time_partitioning = TimePartitioning() >>> table.time_partitioning = time_partitioning >>> table.time_partitioning.field = 'timecolumn' >>> time_partitioning.field 'timecolumn' Args: api_repr (Mapping[str, str]): The serialized representation of the TimePartitioning, such as what is output by :meth:`to_api_repr`. Returns: google.cloud.bigquery.table.TimePartitioning: The ``TimePartitioning`` object. """ instance = cls(api_repr["type"]) instance._properties = api_repr return instance
python
def from_api_repr(cls, api_repr): """Return a :class:`TimePartitioning` object deserialized from a dict. This method creates a new ``TimePartitioning`` instance that points to the ``api_repr`` parameter as its internal properties dict. This means that when a ``TimePartitioning`` instance is stored as a property of another object, any changes made at the higher level will also appear here:: >>> time_partitioning = TimePartitioning() >>> table.time_partitioning = time_partitioning >>> table.time_partitioning.field = 'timecolumn' >>> time_partitioning.field 'timecolumn' Args: api_repr (Mapping[str, str]): The serialized representation of the TimePartitioning, such as what is output by :meth:`to_api_repr`. Returns: google.cloud.bigquery.table.TimePartitioning: The ``TimePartitioning`` object. """ instance = cls(api_repr["type"]) instance._properties = api_repr return instance
[ "def", "from_api_repr", "(", "cls", ",", "api_repr", ")", ":", "instance", "=", "cls", "(", "api_repr", "[", "\"type\"", "]", ")", "instance", ".", "_properties", "=", "api_repr", "return", "instance" ]
Return a :class:`TimePartitioning` object deserialized from a dict. This method creates a new ``TimePartitioning`` instance that points to the ``api_repr`` parameter as its internal properties dict. This means that when a ``TimePartitioning`` instance is stored as a property of another object, any changes made at the higher level will also appear here:: >>> time_partitioning = TimePartitioning() >>> table.time_partitioning = time_partitioning >>> table.time_partitioning.field = 'timecolumn' >>> time_partitioning.field 'timecolumn' Args: api_repr (Mapping[str, str]): The serialized representation of the TimePartitioning, such as what is output by :meth:`to_api_repr`. Returns: google.cloud.bigquery.table.TimePartitioning: The ``TimePartitioning`` object.
[ "Return", "a", ":", "class", ":", "TimePartitioning", "object", "deserialized", "from", "a", "dict", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L1820-L1846
train
googleapis/google-cloud-python
storage/google/cloud/storage/batch.py
_generate_faux_mime_message
def _generate_faux_mime_message(parser, response): """Convert response, content -> (multipart) email.message. Helper for _unpack_batch_response. """ # We coerce to bytes to get consistent concat across # Py2 and Py3. Percent formatting is insufficient since # it includes the b in Py3. content_type = _helpers._to_bytes(response.headers.get("content-type", "")) faux_message = b"".join( [b"Content-Type: ", content_type, b"\nMIME-Version: 1.0\n\n", response.content] ) if six.PY2: return parser.parsestr(faux_message) else: # pragma: NO COVER Python3 return parser.parsestr(faux_message.decode("utf-8"))
python
def _generate_faux_mime_message(parser, response): """Convert response, content -> (multipart) email.message. Helper for _unpack_batch_response. """ # We coerce to bytes to get consistent concat across # Py2 and Py3. Percent formatting is insufficient since # it includes the b in Py3. content_type = _helpers._to_bytes(response.headers.get("content-type", "")) faux_message = b"".join( [b"Content-Type: ", content_type, b"\nMIME-Version: 1.0\n\n", response.content] ) if six.PY2: return parser.parsestr(faux_message) else: # pragma: NO COVER Python3 return parser.parsestr(faux_message.decode("utf-8"))
[ "def", "_generate_faux_mime_message", "(", "parser", ",", "response", ")", ":", "# We coerce to bytes to get consistent concat across", "# Py2 and Py3. Percent formatting is insufficient since", "# it includes the b in Py3.", "content_type", "=", "_helpers", ".", "_to_bytes", "(", "response", ".", "headers", ".", "get", "(", "\"content-type\"", ",", "\"\"", ")", ")", "faux_message", "=", "b\"\"", ".", "join", "(", "[", "b\"Content-Type: \"", ",", "content_type", ",", "b\"\\nMIME-Version: 1.0\\n\\n\"", ",", "response", ".", "content", "]", ")", "if", "six", ".", "PY2", ":", "return", "parser", ".", "parsestr", "(", "faux_message", ")", "else", ":", "# pragma: NO COVER Python3", "return", "parser", ".", "parsestr", "(", "faux_message", ".", "decode", "(", "\"utf-8\"", ")", ")" ]
Convert response, content -> (multipart) email.message. Helper for _unpack_batch_response.
[ "Convert", "response", "content", "-", ">", "(", "multipart", ")", "email", ".", "message", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/batch.py#L284-L301
train
googleapis/google-cloud-python
storage/google/cloud/storage/batch.py
_unpack_batch_response
def _unpack_batch_response(response): """Convert requests.Response -> [(headers, payload)]. Creates a generator of tuples of emulating the responses to :meth:`requests.Session.request`. :type response: :class:`requests.Response` :param response: HTTP response / headers from a request. """ parser = Parser() message = _generate_faux_mime_message(parser, response) if not isinstance(message._payload, list): raise ValueError("Bad response: not multi-part") for subrequest in message._payload: status_line, rest = subrequest._payload.split("\n", 1) _, status, _ = status_line.split(" ", 2) sub_message = parser.parsestr(rest) payload = sub_message._payload msg_headers = dict(sub_message._headers) content_id = msg_headers.get("Content-ID") subresponse = requests.Response() subresponse.request = requests.Request( method="BATCH", url="contentid://{}".format(content_id) ).prepare() subresponse.status_code = int(status) subresponse.headers.update(msg_headers) subresponse._content = payload.encode("utf-8") yield subresponse
python
def _unpack_batch_response(response): """Convert requests.Response -> [(headers, payload)]. Creates a generator of tuples of emulating the responses to :meth:`requests.Session.request`. :type response: :class:`requests.Response` :param response: HTTP response / headers from a request. """ parser = Parser() message = _generate_faux_mime_message(parser, response) if not isinstance(message._payload, list): raise ValueError("Bad response: not multi-part") for subrequest in message._payload: status_line, rest = subrequest._payload.split("\n", 1) _, status, _ = status_line.split(" ", 2) sub_message = parser.parsestr(rest) payload = sub_message._payload msg_headers = dict(sub_message._headers) content_id = msg_headers.get("Content-ID") subresponse = requests.Response() subresponse.request = requests.Request( method="BATCH", url="contentid://{}".format(content_id) ).prepare() subresponse.status_code = int(status) subresponse.headers.update(msg_headers) subresponse._content = payload.encode("utf-8") yield subresponse
[ "def", "_unpack_batch_response", "(", "response", ")", ":", "parser", "=", "Parser", "(", ")", "message", "=", "_generate_faux_mime_message", "(", "parser", ",", "response", ")", "if", "not", "isinstance", "(", "message", ".", "_payload", ",", "list", ")", ":", "raise", "ValueError", "(", "\"Bad response: not multi-part\"", ")", "for", "subrequest", "in", "message", ".", "_payload", ":", "status_line", ",", "rest", "=", "subrequest", ".", "_payload", ".", "split", "(", "\"\\n\"", ",", "1", ")", "_", ",", "status", ",", "_", "=", "status_line", ".", "split", "(", "\" \"", ",", "2", ")", "sub_message", "=", "parser", ".", "parsestr", "(", "rest", ")", "payload", "=", "sub_message", ".", "_payload", "msg_headers", "=", "dict", "(", "sub_message", ".", "_headers", ")", "content_id", "=", "msg_headers", ".", "get", "(", "\"Content-ID\"", ")", "subresponse", "=", "requests", ".", "Response", "(", ")", "subresponse", ".", "request", "=", "requests", ".", "Request", "(", "method", "=", "\"BATCH\"", ",", "url", "=", "\"contentid://{}\"", ".", "format", "(", "content_id", ")", ")", ".", "prepare", "(", ")", "subresponse", ".", "status_code", "=", "int", "(", "status", ")", "subresponse", ".", "headers", ".", "update", "(", "msg_headers", ")", "subresponse", ".", "_content", "=", "payload", ".", "encode", "(", "\"utf-8\"", ")", "yield", "subresponse" ]
Convert requests.Response -> [(headers, payload)]. Creates a generator of tuples of emulating the responses to :meth:`requests.Session.request`. :type response: :class:`requests.Response` :param response: HTTP response / headers from a request.
[ "Convert", "requests", ".", "Response", "-", ">", "[", "(", "headers", "payload", ")", "]", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/batch.py#L304-L335
train
googleapis/google-cloud-python
storage/google/cloud/storage/batch.py
Batch._do_request
def _do_request(self, method, url, headers, data, target_object): """Override Connection: defer actual HTTP request. Only allow up to ``_MAX_BATCH_SIZE`` requests to be deferred. :type method: str :param method: The HTTP method to use in the request. :type url: str :param url: The URL to send the request to. :type headers: dict :param headers: A dictionary of HTTP headers to send with the request. :type data: str :param data: The data to send as the body of the request. :type target_object: object :param target_object: (Optional) This allows us to enable custom behavior in our batch connection. Here we defer an HTTP request and complete initialization of the object at a later time. :rtype: tuple of ``response`` (a dictionary of sorts) and ``content`` (a string). :returns: The HTTP response object and the content of the response. """ if len(self._requests) >= self._MAX_BATCH_SIZE: raise ValueError( "Too many deferred requests (max %d)" % self._MAX_BATCH_SIZE ) self._requests.append((method, url, headers, data)) result = _FutureDict() self._target_objects.append(target_object) if target_object is not None: target_object._properties = result return _FutureResponse(result)
python
def _do_request(self, method, url, headers, data, target_object): """Override Connection: defer actual HTTP request. Only allow up to ``_MAX_BATCH_SIZE`` requests to be deferred. :type method: str :param method: The HTTP method to use in the request. :type url: str :param url: The URL to send the request to. :type headers: dict :param headers: A dictionary of HTTP headers to send with the request. :type data: str :param data: The data to send as the body of the request. :type target_object: object :param target_object: (Optional) This allows us to enable custom behavior in our batch connection. Here we defer an HTTP request and complete initialization of the object at a later time. :rtype: tuple of ``response`` (a dictionary of sorts) and ``content`` (a string). :returns: The HTTP response object and the content of the response. """ if len(self._requests) >= self._MAX_BATCH_SIZE: raise ValueError( "Too many deferred requests (max %d)" % self._MAX_BATCH_SIZE ) self._requests.append((method, url, headers, data)) result = _FutureDict() self._target_objects.append(target_object) if target_object is not None: target_object._properties = result return _FutureResponse(result)
[ "def", "_do_request", "(", "self", ",", "method", ",", "url", ",", "headers", ",", "data", ",", "target_object", ")", ":", "if", "len", "(", "self", ".", "_requests", ")", ">=", "self", ".", "_MAX_BATCH_SIZE", ":", "raise", "ValueError", "(", "\"Too many deferred requests (max %d)\"", "%", "self", ".", "_MAX_BATCH_SIZE", ")", "self", ".", "_requests", ".", "append", "(", "(", "method", ",", "url", ",", "headers", ",", "data", ")", ")", "result", "=", "_FutureDict", "(", ")", "self", ".", "_target_objects", ".", "append", "(", "target_object", ")", "if", "target_object", "is", "not", "None", ":", "target_object", ".", "_properties", "=", "result", "return", "_FutureResponse", "(", "result", ")" ]
Override Connection: defer actual HTTP request. Only allow up to ``_MAX_BATCH_SIZE`` requests to be deferred. :type method: str :param method: The HTTP method to use in the request. :type url: str :param url: The URL to send the request to. :type headers: dict :param headers: A dictionary of HTTP headers to send with the request. :type data: str :param data: The data to send as the body of the request. :type target_object: object :param target_object: (Optional) This allows us to enable custom behavior in our batch connection. Here we defer an HTTP request and complete initialization of the object at a later time. :rtype: tuple of ``response`` (a dictionary of sorts) and ``content`` (a string). :returns: The HTTP response object and the content of the response.
[ "Override", "Connection", ":", "defer", "actual", "HTTP", "request", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/batch.py#L153-L189
train
googleapis/google-cloud-python
storage/google/cloud/storage/batch.py
Batch._prepare_batch_request
def _prepare_batch_request(self): """Prepares headers and body for a batch request. :rtype: tuple (dict, str) :returns: The pair of headers and body of the batch request to be sent. :raises: :class:`ValueError` if no requests have been deferred. """ if len(self._requests) == 0: raise ValueError("No deferred requests") multi = MIMEMultipart() for method, uri, headers, body in self._requests: subrequest = MIMEApplicationHTTP(method, uri, headers, body) multi.attach(subrequest) # The `email` package expects to deal with "native" strings if six.PY3: # pragma: NO COVER Python3 buf = io.StringIO() else: buf = io.BytesIO() generator = Generator(buf, False, 0) generator.flatten(multi) payload = buf.getvalue() # Strip off redundant header text _, body = payload.split("\n\n", 1) return dict(multi._headers), body
python
def _prepare_batch_request(self): """Prepares headers and body for a batch request. :rtype: tuple (dict, str) :returns: The pair of headers and body of the batch request to be sent. :raises: :class:`ValueError` if no requests have been deferred. """ if len(self._requests) == 0: raise ValueError("No deferred requests") multi = MIMEMultipart() for method, uri, headers, body in self._requests: subrequest = MIMEApplicationHTTP(method, uri, headers, body) multi.attach(subrequest) # The `email` package expects to deal with "native" strings if six.PY3: # pragma: NO COVER Python3 buf = io.StringIO() else: buf = io.BytesIO() generator = Generator(buf, False, 0) generator.flatten(multi) payload = buf.getvalue() # Strip off redundant header text _, body = payload.split("\n\n", 1) return dict(multi._headers), body
[ "def", "_prepare_batch_request", "(", "self", ")", ":", "if", "len", "(", "self", ".", "_requests", ")", "==", "0", ":", "raise", "ValueError", "(", "\"No deferred requests\"", ")", "multi", "=", "MIMEMultipart", "(", ")", "for", "method", ",", "uri", ",", "headers", ",", "body", "in", "self", ".", "_requests", ":", "subrequest", "=", "MIMEApplicationHTTP", "(", "method", ",", "uri", ",", "headers", ",", "body", ")", "multi", ".", "attach", "(", "subrequest", ")", "# The `email` package expects to deal with \"native\" strings", "if", "six", ".", "PY3", ":", "# pragma: NO COVER Python3", "buf", "=", "io", ".", "StringIO", "(", ")", "else", ":", "buf", "=", "io", ".", "BytesIO", "(", ")", "generator", "=", "Generator", "(", "buf", ",", "False", ",", "0", ")", "generator", ".", "flatten", "(", "multi", ")", "payload", "=", "buf", ".", "getvalue", "(", ")", "# Strip off redundant header text", "_", ",", "body", "=", "payload", ".", "split", "(", "\"\\n\\n\"", ",", "1", ")", "return", "dict", "(", "multi", ".", "_headers", ")", ",", "body" ]
Prepares headers and body for a batch request. :rtype: tuple (dict, str) :returns: The pair of headers and body of the batch request to be sent. :raises: :class:`ValueError` if no requests have been deferred.
[ "Prepares", "headers", "and", "body", "for", "a", "batch", "request", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/batch.py#L191-L218
train
googleapis/google-cloud-python
storage/google/cloud/storage/batch.py
Batch._finish_futures
def _finish_futures(self, responses): """Apply all the batch responses to the futures created. :type responses: list of (headers, payload) tuples. :param responses: List of headers and payloads from each response in the batch. :raises: :class:`ValueError` if no requests have been deferred. """ # If a bad status occurs, we track it, but don't raise an exception # until all futures have been populated. exception_args = None if len(self._target_objects) != len(responses): raise ValueError("Expected a response for every request.") for target_object, subresponse in zip(self._target_objects, responses): if not 200 <= subresponse.status_code < 300: exception_args = exception_args or subresponse elif target_object is not None: try: target_object._properties = subresponse.json() except ValueError: target_object._properties = subresponse.content if exception_args is not None: raise exceptions.from_http_response(exception_args)
python
def _finish_futures(self, responses): """Apply all the batch responses to the futures created. :type responses: list of (headers, payload) tuples. :param responses: List of headers and payloads from each response in the batch. :raises: :class:`ValueError` if no requests have been deferred. """ # If a bad status occurs, we track it, but don't raise an exception # until all futures have been populated. exception_args = None if len(self._target_objects) != len(responses): raise ValueError("Expected a response for every request.") for target_object, subresponse in zip(self._target_objects, responses): if not 200 <= subresponse.status_code < 300: exception_args = exception_args or subresponse elif target_object is not None: try: target_object._properties = subresponse.json() except ValueError: target_object._properties = subresponse.content if exception_args is not None: raise exceptions.from_http_response(exception_args)
[ "def", "_finish_futures", "(", "self", ",", "responses", ")", ":", "# If a bad status occurs, we track it, but don't raise an exception", "# until all futures have been populated.", "exception_args", "=", "None", "if", "len", "(", "self", ".", "_target_objects", ")", "!=", "len", "(", "responses", ")", ":", "raise", "ValueError", "(", "\"Expected a response for every request.\"", ")", "for", "target_object", ",", "subresponse", "in", "zip", "(", "self", ".", "_target_objects", ",", "responses", ")", ":", "if", "not", "200", "<=", "subresponse", ".", "status_code", "<", "300", ":", "exception_args", "=", "exception_args", "or", "subresponse", "elif", "target_object", "is", "not", "None", ":", "try", ":", "target_object", ".", "_properties", "=", "subresponse", ".", "json", "(", ")", "except", "ValueError", ":", "target_object", ".", "_properties", "=", "subresponse", ".", "content", "if", "exception_args", "is", "not", "None", ":", "raise", "exceptions", ".", "from_http_response", "(", "exception_args", ")" ]
Apply all the batch responses to the futures created. :type responses: list of (headers, payload) tuples. :param responses: List of headers and payloads from each response in the batch. :raises: :class:`ValueError` if no requests have been deferred.
[ "Apply", "all", "the", "batch", "responses", "to", "the", "futures", "created", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/batch.py#L220-L246
train
googleapis/google-cloud-python
storage/google/cloud/storage/batch.py
Batch.finish
def finish(self): """Submit a single `multipart/mixed` request with deferred requests. :rtype: list of tuples :returns: one ``(headers, payload)`` tuple per deferred request. """ headers, body = self._prepare_batch_request() url = "%s/batch/storage/v1" % self.API_BASE_URL # Use the private ``_base_connection`` rather than the property # ``_connection``, since the property may be this # current batch. response = self._client._base_connection._make_request( "POST", url, data=body, headers=headers ) responses = list(_unpack_batch_response(response)) self._finish_futures(responses) return responses
python
def finish(self): """Submit a single `multipart/mixed` request with deferred requests. :rtype: list of tuples :returns: one ``(headers, payload)`` tuple per deferred request. """ headers, body = self._prepare_batch_request() url = "%s/batch/storage/v1" % self.API_BASE_URL # Use the private ``_base_connection`` rather than the property # ``_connection``, since the property may be this # current batch. response = self._client._base_connection._make_request( "POST", url, data=body, headers=headers ) responses = list(_unpack_batch_response(response)) self._finish_futures(responses) return responses
[ "def", "finish", "(", "self", ")", ":", "headers", ",", "body", "=", "self", ".", "_prepare_batch_request", "(", ")", "url", "=", "\"%s/batch/storage/v1\"", "%", "self", ".", "API_BASE_URL", "# Use the private ``_base_connection`` rather than the property", "# ``_connection``, since the property may be this", "# current batch.", "response", "=", "self", ".", "_client", ".", "_base_connection", ".", "_make_request", "(", "\"POST\"", ",", "url", ",", "data", "=", "body", ",", "headers", "=", "headers", ")", "responses", "=", "list", "(", "_unpack_batch_response", "(", "response", ")", ")", "self", ".", "_finish_futures", "(", "responses", ")", "return", "responses" ]
Submit a single `multipart/mixed` request with deferred requests. :rtype: list of tuples :returns: one ``(headers, payload)`` tuple per deferred request.
[ "Submit", "a", "single", "multipart", "/", "mixed", "request", "with", "deferred", "requests", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/batch.py#L248-L266
train
googleapis/google-cloud-python
spanner/google/cloud/spanner_v1/snapshot.py
_restart_on_unavailable
def _restart_on_unavailable(restart): """Restart iteration after :exc:`.ServiceUnavailable`. :type restart: callable :param restart: curried function returning iterator """ resume_token = b"" item_buffer = [] iterator = restart() while True: try: for item in iterator: item_buffer.append(item) if item.resume_token: resume_token = item.resume_token break except ServiceUnavailable: del item_buffer[:] iterator = restart(resume_token=resume_token) continue if len(item_buffer) == 0: break for item in item_buffer: yield item del item_buffer[:]
python
def _restart_on_unavailable(restart): """Restart iteration after :exc:`.ServiceUnavailable`. :type restart: callable :param restart: curried function returning iterator """ resume_token = b"" item_buffer = [] iterator = restart() while True: try: for item in iterator: item_buffer.append(item) if item.resume_token: resume_token = item.resume_token break except ServiceUnavailable: del item_buffer[:] iterator = restart(resume_token=resume_token) continue if len(item_buffer) == 0: break for item in item_buffer: yield item del item_buffer[:]
[ "def", "_restart_on_unavailable", "(", "restart", ")", ":", "resume_token", "=", "b\"\"", "item_buffer", "=", "[", "]", "iterator", "=", "restart", "(", ")", "while", "True", ":", "try", ":", "for", "item", "in", "iterator", ":", "item_buffer", ".", "append", "(", "item", ")", "if", "item", ".", "resume_token", ":", "resume_token", "=", "item", ".", "resume_token", "break", "except", "ServiceUnavailable", ":", "del", "item_buffer", "[", ":", "]", "iterator", "=", "restart", "(", "resume_token", "=", "resume_token", ")", "continue", "if", "len", "(", "item_buffer", ")", "==", "0", ":", "break", "for", "item", "in", "item_buffer", ":", "yield", "item", "del", "item_buffer", "[", ":", "]" ]
Restart iteration after :exc:`.ServiceUnavailable`. :type restart: callable :param restart: curried function returning iterator
[ "Restart", "iteration", "after", ":", "exc", ":", ".", "ServiceUnavailable", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/snapshot.py#L34-L61
train
googleapis/google-cloud-python
spanner/google/cloud/spanner_v1/snapshot.py
_SnapshotBase.read
def read(self, table, columns, keyset, index="", limit=0, partition=None): """Perform a ``StreamingRead`` API request for rows in a table. :type table: str :param table: name of the table from which to fetch data :type columns: list of str :param columns: names of columns to be retrieved :type keyset: :class:`~google.cloud.spanner_v1.keyset.KeySet` :param keyset: keys / ranges identifying rows to be retrieved :type index: str :param index: (Optional) name of index to use, rather than the table's primary key :type limit: int :param limit: (Optional) maximum number of rows to return. Incompatible with ``partition``. :type partition: bytes :param partition: (Optional) one of the partition tokens returned from :meth:`partition_read`. Incompatible with ``limit``. :rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet` :returns: a result set instance which can be used to consume rows. :raises ValueError: for reuse of single-use snapshots, or if a transaction ID is already pending for multiple-use snapshots. """ if self._read_request_count > 0: if not self._multi_use: raise ValueError("Cannot re-use single-use snapshot.") if self._transaction_id is None: raise ValueError("Transaction ID pending.") database = self._session._database api = database.spanner_api metadata = _metadata_with_prefix(database.name) transaction = self._make_txn_selector() restart = functools.partial( api.streaming_read, self._session.name, table, columns, keyset._to_pb(), transaction=transaction, index=index, limit=limit, partition_token=partition, metadata=metadata, ) iterator = _restart_on_unavailable(restart) self._read_request_count += 1 if self._multi_use: return StreamedResultSet(iterator, source=self) else: return StreamedResultSet(iterator)
python
def read(self, table, columns, keyset, index="", limit=0, partition=None): """Perform a ``StreamingRead`` API request for rows in a table. :type table: str :param table: name of the table from which to fetch data :type columns: list of str :param columns: names of columns to be retrieved :type keyset: :class:`~google.cloud.spanner_v1.keyset.KeySet` :param keyset: keys / ranges identifying rows to be retrieved :type index: str :param index: (Optional) name of index to use, rather than the table's primary key :type limit: int :param limit: (Optional) maximum number of rows to return. Incompatible with ``partition``. :type partition: bytes :param partition: (Optional) one of the partition tokens returned from :meth:`partition_read`. Incompatible with ``limit``. :rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet` :returns: a result set instance which can be used to consume rows. :raises ValueError: for reuse of single-use snapshots, or if a transaction ID is already pending for multiple-use snapshots. """ if self._read_request_count > 0: if not self._multi_use: raise ValueError("Cannot re-use single-use snapshot.") if self._transaction_id is None: raise ValueError("Transaction ID pending.") database = self._session._database api = database.spanner_api metadata = _metadata_with_prefix(database.name) transaction = self._make_txn_selector() restart = functools.partial( api.streaming_read, self._session.name, table, columns, keyset._to_pb(), transaction=transaction, index=index, limit=limit, partition_token=partition, metadata=metadata, ) iterator = _restart_on_unavailable(restart) self._read_request_count += 1 if self._multi_use: return StreamedResultSet(iterator, source=self) else: return StreamedResultSet(iterator)
[ "def", "read", "(", "self", ",", "table", ",", "columns", ",", "keyset", ",", "index", "=", "\"\"", ",", "limit", "=", "0", ",", "partition", "=", "None", ")", ":", "if", "self", ".", "_read_request_count", ">", "0", ":", "if", "not", "self", ".", "_multi_use", ":", "raise", "ValueError", "(", "\"Cannot re-use single-use snapshot.\"", ")", "if", "self", ".", "_transaction_id", "is", "None", ":", "raise", "ValueError", "(", "\"Transaction ID pending.\"", ")", "database", "=", "self", ".", "_session", ".", "_database", "api", "=", "database", ".", "spanner_api", "metadata", "=", "_metadata_with_prefix", "(", "database", ".", "name", ")", "transaction", "=", "self", ".", "_make_txn_selector", "(", ")", "restart", "=", "functools", ".", "partial", "(", "api", ".", "streaming_read", ",", "self", ".", "_session", ".", "name", ",", "table", ",", "columns", ",", "keyset", ".", "_to_pb", "(", ")", ",", "transaction", "=", "transaction", ",", "index", "=", "index", ",", "limit", "=", "limit", ",", "partition_token", "=", "partition", ",", "metadata", "=", "metadata", ",", ")", "iterator", "=", "_restart_on_unavailable", "(", "restart", ")", "self", ".", "_read_request_count", "+=", "1", "if", "self", ".", "_multi_use", ":", "return", "StreamedResultSet", "(", "iterator", ",", "source", "=", "self", ")", "else", ":", "return", "StreamedResultSet", "(", "iterator", ")" ]
Perform a ``StreamingRead`` API request for rows in a table. :type table: str :param table: name of the table from which to fetch data :type columns: list of str :param columns: names of columns to be retrieved :type keyset: :class:`~google.cloud.spanner_v1.keyset.KeySet` :param keyset: keys / ranges identifying rows to be retrieved :type index: str :param index: (Optional) name of index to use, rather than the table's primary key :type limit: int :param limit: (Optional) maximum number of rows to return. Incompatible with ``partition``. :type partition: bytes :param partition: (Optional) one of the partition tokens returned from :meth:`partition_read`. Incompatible with ``limit``. :rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet` :returns: a result set instance which can be used to consume rows. :raises ValueError: for reuse of single-use snapshots, or if a transaction ID is already pending for multiple-use snapshots.
[ "Perform", "a", "StreamingRead", "API", "request", "for", "rows", "in", "a", "table", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/snapshot.py#L89-L152
train
googleapis/google-cloud-python
spanner/google/cloud/spanner_v1/snapshot.py
_SnapshotBase.execute_sql
def execute_sql( self, sql, params=None, param_types=None, query_mode=None, partition=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, ): """Perform an ``ExecuteStreamingSql`` API request. :type sql: str :param sql: SQL query statement :type params: dict, {str -> column value} :param params: values for parameter replacement. Keys must match the names used in ``sql``. :type param_types: dict[str -> Union[dict, .types.Type]] :param param_types: (Optional) maps explicit types for one or more param values; required if parameters are passed. :type query_mode: :class:`google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryMode` :param query_mode: Mode governing return of results / query plan. See https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.ExecuteSqlRequest.QueryMode1 :type partition: bytes :param partition: (Optional) one of the partition tokens returned from :meth:`partition_query`. :rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet` :returns: a result set instance which can be used to consume rows. :raises ValueError: for reuse of single-use snapshots, or if a transaction ID is already pending for multiple-use snapshots. """ if self._read_request_count > 0: if not self._multi_use: raise ValueError("Cannot re-use single-use snapshot.") if self._transaction_id is None: raise ValueError("Transaction ID pending.") if params is not None: if param_types is None: raise ValueError("Specify 'param_types' when passing 'params'.") params_pb = Struct( fields={key: _make_value_pb(value) for key, value in params.items()} ) else: params_pb = None database = self._session._database metadata = _metadata_with_prefix(database.name) transaction = self._make_txn_selector() api = database.spanner_api restart = functools.partial( api.execute_streaming_sql, self._session.name, sql, transaction=transaction, params=params_pb, param_types=param_types, query_mode=query_mode, partition_token=partition, seqno=self._execute_sql_count, metadata=metadata, retry=retry, timeout=timeout, ) iterator = _restart_on_unavailable(restart) self._read_request_count += 1 self._execute_sql_count += 1 if self._multi_use: return StreamedResultSet(iterator, source=self) else: return StreamedResultSet(iterator)
python
def execute_sql( self, sql, params=None, param_types=None, query_mode=None, partition=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, ): """Perform an ``ExecuteStreamingSql`` API request. :type sql: str :param sql: SQL query statement :type params: dict, {str -> column value} :param params: values for parameter replacement. Keys must match the names used in ``sql``. :type param_types: dict[str -> Union[dict, .types.Type]] :param param_types: (Optional) maps explicit types for one or more param values; required if parameters are passed. :type query_mode: :class:`google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryMode` :param query_mode: Mode governing return of results / query plan. See https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.ExecuteSqlRequest.QueryMode1 :type partition: bytes :param partition: (Optional) one of the partition tokens returned from :meth:`partition_query`. :rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet` :returns: a result set instance which can be used to consume rows. :raises ValueError: for reuse of single-use snapshots, or if a transaction ID is already pending for multiple-use snapshots. """ if self._read_request_count > 0: if not self._multi_use: raise ValueError("Cannot re-use single-use snapshot.") if self._transaction_id is None: raise ValueError("Transaction ID pending.") if params is not None: if param_types is None: raise ValueError("Specify 'param_types' when passing 'params'.") params_pb = Struct( fields={key: _make_value_pb(value) for key, value in params.items()} ) else: params_pb = None database = self._session._database metadata = _metadata_with_prefix(database.name) transaction = self._make_txn_selector() api = database.spanner_api restart = functools.partial( api.execute_streaming_sql, self._session.name, sql, transaction=transaction, params=params_pb, param_types=param_types, query_mode=query_mode, partition_token=partition, seqno=self._execute_sql_count, metadata=metadata, retry=retry, timeout=timeout, ) iterator = _restart_on_unavailable(restart) self._read_request_count += 1 self._execute_sql_count += 1 if self._multi_use: return StreamedResultSet(iterator, source=self) else: return StreamedResultSet(iterator)
[ "def", "execute_sql", "(", "self", ",", "sql", ",", "params", "=", "None", ",", "param_types", "=", "None", ",", "query_mode", "=", "None", ",", "partition", "=", "None", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", ")", ":", "if", "self", ".", "_read_request_count", ">", "0", ":", "if", "not", "self", ".", "_multi_use", ":", "raise", "ValueError", "(", "\"Cannot re-use single-use snapshot.\"", ")", "if", "self", ".", "_transaction_id", "is", "None", ":", "raise", "ValueError", "(", "\"Transaction ID pending.\"", ")", "if", "params", "is", "not", "None", ":", "if", "param_types", "is", "None", ":", "raise", "ValueError", "(", "\"Specify 'param_types' when passing 'params'.\"", ")", "params_pb", "=", "Struct", "(", "fields", "=", "{", "key", ":", "_make_value_pb", "(", "value", ")", "for", "key", ",", "value", "in", "params", ".", "items", "(", ")", "}", ")", "else", ":", "params_pb", "=", "None", "database", "=", "self", ".", "_session", ".", "_database", "metadata", "=", "_metadata_with_prefix", "(", "database", ".", "name", ")", "transaction", "=", "self", ".", "_make_txn_selector", "(", ")", "api", "=", "database", ".", "spanner_api", "restart", "=", "functools", ".", "partial", "(", "api", ".", "execute_streaming_sql", ",", "self", ".", "_session", ".", "name", ",", "sql", ",", "transaction", "=", "transaction", ",", "params", "=", "params_pb", ",", "param_types", "=", "param_types", ",", "query_mode", "=", "query_mode", ",", "partition_token", "=", "partition", ",", "seqno", "=", "self", ".", "_execute_sql_count", ",", "metadata", "=", "metadata", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ",", ")", "iterator", "=", "_restart_on_unavailable", "(", "restart", ")", "self", ".", "_read_request_count", "+=", "1", "self", ".", "_execute_sql_count", "+=", "1", "if", "self", ".", "_multi_use", ":", "return", "StreamedResultSet", "(", "iterator", ",", "source", "=", "self", ")", "else", ":", "return", "StreamedResultSet", "(", "iterator", ")" ]
Perform an ``ExecuteStreamingSql`` API request. :type sql: str :param sql: SQL query statement :type params: dict, {str -> column value} :param params: values for parameter replacement. Keys must match the names used in ``sql``. :type param_types: dict[str -> Union[dict, .types.Type]] :param param_types: (Optional) maps explicit types for one or more param values; required if parameters are passed. :type query_mode: :class:`google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryMode` :param query_mode: Mode governing return of results / query plan. See https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.ExecuteSqlRequest.QueryMode1 :type partition: bytes :param partition: (Optional) one of the partition tokens returned from :meth:`partition_query`. :rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet` :returns: a result set instance which can be used to consume rows. :raises ValueError: for reuse of single-use snapshots, or if a transaction ID is already pending for multiple-use snapshots.
[ "Perform", "an", "ExecuteStreamingSql", "API", "request", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/snapshot.py#L154-L237
train
googleapis/google-cloud-python
spanner/google/cloud/spanner_v1/snapshot.py
_SnapshotBase.partition_read
def partition_read( self, table, columns, keyset, index="", partition_size_bytes=None, max_partitions=None, ): """Perform a ``ParitionRead`` API request for rows in a table. :type table: str :param table: name of the table from which to fetch data :type columns: list of str :param columns: names of columns to be retrieved :type keyset: :class:`~google.cloud.spanner_v1.keyset.KeySet` :param keyset: keys / ranges identifying rows to be retrieved :type index: str :param index: (Optional) name of index to use, rather than the table's primary key :type partition_size_bytes: int :param partition_size_bytes: (Optional) desired size for each partition generated. The service uses this as a hint, the actual partition size may differ. :type max_partitions: int :param max_partitions: (Optional) desired maximum number of partitions generated. The service uses this as a hint, the actual number of partitions may differ. :rtype: iterable of bytes :returns: a sequence of partition tokens :raises ValueError: for single-use snapshots, or if a transaction ID is already associtated with the snapshot. """ if not self._multi_use: raise ValueError("Cannot use single-use snapshot.") if self._transaction_id is None: raise ValueError("Transaction not started.") database = self._session._database api = database.spanner_api metadata = _metadata_with_prefix(database.name) transaction = self._make_txn_selector() partition_options = PartitionOptions( partition_size_bytes=partition_size_bytes, max_partitions=max_partitions ) response = api.partition_read( session=self._session.name, table=table, columns=columns, key_set=keyset._to_pb(), transaction=transaction, index=index, partition_options=partition_options, metadata=metadata, ) return [partition.partition_token for partition in response.partitions]
python
def partition_read( self, table, columns, keyset, index="", partition_size_bytes=None, max_partitions=None, ): """Perform a ``ParitionRead`` API request for rows in a table. :type table: str :param table: name of the table from which to fetch data :type columns: list of str :param columns: names of columns to be retrieved :type keyset: :class:`~google.cloud.spanner_v1.keyset.KeySet` :param keyset: keys / ranges identifying rows to be retrieved :type index: str :param index: (Optional) name of index to use, rather than the table's primary key :type partition_size_bytes: int :param partition_size_bytes: (Optional) desired size for each partition generated. The service uses this as a hint, the actual partition size may differ. :type max_partitions: int :param max_partitions: (Optional) desired maximum number of partitions generated. The service uses this as a hint, the actual number of partitions may differ. :rtype: iterable of bytes :returns: a sequence of partition tokens :raises ValueError: for single-use snapshots, or if a transaction ID is already associtated with the snapshot. """ if not self._multi_use: raise ValueError("Cannot use single-use snapshot.") if self._transaction_id is None: raise ValueError("Transaction not started.") database = self._session._database api = database.spanner_api metadata = _metadata_with_prefix(database.name) transaction = self._make_txn_selector() partition_options = PartitionOptions( partition_size_bytes=partition_size_bytes, max_partitions=max_partitions ) response = api.partition_read( session=self._session.name, table=table, columns=columns, key_set=keyset._to_pb(), transaction=transaction, index=index, partition_options=partition_options, metadata=metadata, ) return [partition.partition_token for partition in response.partitions]
[ "def", "partition_read", "(", "self", ",", "table", ",", "columns", ",", "keyset", ",", "index", "=", "\"\"", ",", "partition_size_bytes", "=", "None", ",", "max_partitions", "=", "None", ",", ")", ":", "if", "not", "self", ".", "_multi_use", ":", "raise", "ValueError", "(", "\"Cannot use single-use snapshot.\"", ")", "if", "self", ".", "_transaction_id", "is", "None", ":", "raise", "ValueError", "(", "\"Transaction not started.\"", ")", "database", "=", "self", ".", "_session", ".", "_database", "api", "=", "database", ".", "spanner_api", "metadata", "=", "_metadata_with_prefix", "(", "database", ".", "name", ")", "transaction", "=", "self", ".", "_make_txn_selector", "(", ")", "partition_options", "=", "PartitionOptions", "(", "partition_size_bytes", "=", "partition_size_bytes", ",", "max_partitions", "=", "max_partitions", ")", "response", "=", "api", ".", "partition_read", "(", "session", "=", "self", ".", "_session", ".", "name", ",", "table", "=", "table", ",", "columns", "=", "columns", ",", "key_set", "=", "keyset", ".", "_to_pb", "(", ")", ",", "transaction", "=", "transaction", ",", "index", "=", "index", ",", "partition_options", "=", "partition_options", ",", "metadata", "=", "metadata", ",", ")", "return", "[", "partition", ".", "partition_token", "for", "partition", "in", "response", ".", "partitions", "]" ]
Perform a ``ParitionRead`` API request for rows in a table. :type table: str :param table: name of the table from which to fetch data :type columns: list of str :param columns: names of columns to be retrieved :type keyset: :class:`~google.cloud.spanner_v1.keyset.KeySet` :param keyset: keys / ranges identifying rows to be retrieved :type index: str :param index: (Optional) name of index to use, rather than the table's primary key :type partition_size_bytes: int :param partition_size_bytes: (Optional) desired size for each partition generated. The service uses this as a hint, the actual partition size may differ. :type max_partitions: int :param max_partitions: (Optional) desired maximum number of partitions generated. The service uses this as a hint, the actual number of partitions may differ. :rtype: iterable of bytes :returns: a sequence of partition tokens :raises ValueError: for single-use snapshots, or if a transaction ID is already associtated with the snapshot.
[ "Perform", "a", "ParitionRead", "API", "request", "for", "rows", "in", "a", "table", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/snapshot.py#L239-L306
train
googleapis/google-cloud-python
spanner/google/cloud/spanner_v1/snapshot.py
_SnapshotBase.partition_query
def partition_query( self, sql, params=None, param_types=None, partition_size_bytes=None, max_partitions=None, ): """Perform a ``ParitionQuery`` API request. :type sql: str :param sql: SQL query statement :type params: dict, {str -> column value} :param params: values for parameter replacement. Keys must match the names used in ``sql``. :type param_types: dict[str -> Union[dict, .types.Type]] :param param_types: (Optional) maps explicit types for one or more param values; required if parameters are passed. :type partition_size_bytes: int :param partition_size_bytes: (Optional) desired size for each partition generated. The service uses this as a hint, the actual partition size may differ. :type max_partitions: int :param max_partitions: (Optional) desired maximum number of partitions generated. The service uses this as a hint, the actual number of partitions may differ. :rtype: iterable of bytes :returns: a sequence of partition tokens :raises ValueError: for single-use snapshots, or if a transaction ID is already associtated with the snapshot. """ if not self._multi_use: raise ValueError("Cannot use single-use snapshot.") if self._transaction_id is None: raise ValueError("Transaction not started.") if params is not None: if param_types is None: raise ValueError("Specify 'param_types' when passing 'params'.") params_pb = Struct( fields={key: _make_value_pb(value) for key, value in params.items()} ) else: params_pb = None database = self._session._database api = database.spanner_api metadata = _metadata_with_prefix(database.name) transaction = self._make_txn_selector() partition_options = PartitionOptions( partition_size_bytes=partition_size_bytes, max_partitions=max_partitions ) response = api.partition_query( session=self._session.name, sql=sql, transaction=transaction, params=params_pb, param_types=param_types, partition_options=partition_options, metadata=metadata, ) return [partition.partition_token for partition in response.partitions]
python
def partition_query( self, sql, params=None, param_types=None, partition_size_bytes=None, max_partitions=None, ): """Perform a ``ParitionQuery`` API request. :type sql: str :param sql: SQL query statement :type params: dict, {str -> column value} :param params: values for parameter replacement. Keys must match the names used in ``sql``. :type param_types: dict[str -> Union[dict, .types.Type]] :param param_types: (Optional) maps explicit types for one or more param values; required if parameters are passed. :type partition_size_bytes: int :param partition_size_bytes: (Optional) desired size for each partition generated. The service uses this as a hint, the actual partition size may differ. :type max_partitions: int :param max_partitions: (Optional) desired maximum number of partitions generated. The service uses this as a hint, the actual number of partitions may differ. :rtype: iterable of bytes :returns: a sequence of partition tokens :raises ValueError: for single-use snapshots, or if a transaction ID is already associtated with the snapshot. """ if not self._multi_use: raise ValueError("Cannot use single-use snapshot.") if self._transaction_id is None: raise ValueError("Transaction not started.") if params is not None: if param_types is None: raise ValueError("Specify 'param_types' when passing 'params'.") params_pb = Struct( fields={key: _make_value_pb(value) for key, value in params.items()} ) else: params_pb = None database = self._session._database api = database.spanner_api metadata = _metadata_with_prefix(database.name) transaction = self._make_txn_selector() partition_options = PartitionOptions( partition_size_bytes=partition_size_bytes, max_partitions=max_partitions ) response = api.partition_query( session=self._session.name, sql=sql, transaction=transaction, params=params_pb, param_types=param_types, partition_options=partition_options, metadata=metadata, ) return [partition.partition_token for partition in response.partitions]
[ "def", "partition_query", "(", "self", ",", "sql", ",", "params", "=", "None", ",", "param_types", "=", "None", ",", "partition_size_bytes", "=", "None", ",", "max_partitions", "=", "None", ",", ")", ":", "if", "not", "self", ".", "_multi_use", ":", "raise", "ValueError", "(", "\"Cannot use single-use snapshot.\"", ")", "if", "self", ".", "_transaction_id", "is", "None", ":", "raise", "ValueError", "(", "\"Transaction not started.\"", ")", "if", "params", "is", "not", "None", ":", "if", "param_types", "is", "None", ":", "raise", "ValueError", "(", "\"Specify 'param_types' when passing 'params'.\"", ")", "params_pb", "=", "Struct", "(", "fields", "=", "{", "key", ":", "_make_value_pb", "(", "value", ")", "for", "key", ",", "value", "in", "params", ".", "items", "(", ")", "}", ")", "else", ":", "params_pb", "=", "None", "database", "=", "self", ".", "_session", ".", "_database", "api", "=", "database", ".", "spanner_api", "metadata", "=", "_metadata_with_prefix", "(", "database", ".", "name", ")", "transaction", "=", "self", ".", "_make_txn_selector", "(", ")", "partition_options", "=", "PartitionOptions", "(", "partition_size_bytes", "=", "partition_size_bytes", ",", "max_partitions", "=", "max_partitions", ")", "response", "=", "api", ".", "partition_query", "(", "session", "=", "self", ".", "_session", ".", "name", ",", "sql", "=", "sql", ",", "transaction", "=", "transaction", ",", "params", "=", "params_pb", ",", "param_types", "=", "param_types", ",", "partition_options", "=", "partition_options", ",", "metadata", "=", "metadata", ",", ")", "return", "[", "partition", ".", "partition_token", "for", "partition", "in", "response", ".", "partitions", "]" ]
Perform a ``ParitionQuery`` API request. :type sql: str :param sql: SQL query statement :type params: dict, {str -> column value} :param params: values for parameter replacement. Keys must match the names used in ``sql``. :type param_types: dict[str -> Union[dict, .types.Type]] :param param_types: (Optional) maps explicit types for one or more param values; required if parameters are passed. :type partition_size_bytes: int :param partition_size_bytes: (Optional) desired size for each partition generated. The service uses this as a hint, the actual partition size may differ. :type max_partitions: int :param max_partitions: (Optional) desired maximum number of partitions generated. The service uses this as a hint, the actual number of partitions may differ. :rtype: iterable of bytes :returns: a sequence of partition tokens :raises ValueError: for single-use snapshots, or if a transaction ID is already associtated with the snapshot.
[ "Perform", "a", "ParitionQuery", "API", "request", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/snapshot.py#L308-L381
train
googleapis/google-cloud-python
spanner/google/cloud/spanner_v1/snapshot.py
Snapshot._make_txn_selector
def _make_txn_selector(self): """Helper for :meth:`read`.""" if self._transaction_id is not None: return TransactionSelector(id=self._transaction_id) if self._read_timestamp: key = "read_timestamp" value = _datetime_to_pb_timestamp(self._read_timestamp) elif self._min_read_timestamp: key = "min_read_timestamp" value = _datetime_to_pb_timestamp(self._min_read_timestamp) elif self._max_staleness: key = "max_staleness" value = _timedelta_to_duration_pb(self._max_staleness) elif self._exact_staleness: key = "exact_staleness" value = _timedelta_to_duration_pb(self._exact_staleness) else: key = "strong" value = True options = TransactionOptions( read_only=TransactionOptions.ReadOnly(**{key: value}) ) if self._multi_use: return TransactionSelector(begin=options) else: return TransactionSelector(single_use=options)
python
def _make_txn_selector(self): """Helper for :meth:`read`.""" if self._transaction_id is not None: return TransactionSelector(id=self._transaction_id) if self._read_timestamp: key = "read_timestamp" value = _datetime_to_pb_timestamp(self._read_timestamp) elif self._min_read_timestamp: key = "min_read_timestamp" value = _datetime_to_pb_timestamp(self._min_read_timestamp) elif self._max_staleness: key = "max_staleness" value = _timedelta_to_duration_pb(self._max_staleness) elif self._exact_staleness: key = "exact_staleness" value = _timedelta_to_duration_pb(self._exact_staleness) else: key = "strong" value = True options = TransactionOptions( read_only=TransactionOptions.ReadOnly(**{key: value}) ) if self._multi_use: return TransactionSelector(begin=options) else: return TransactionSelector(single_use=options)
[ "def", "_make_txn_selector", "(", "self", ")", ":", "if", "self", ".", "_transaction_id", "is", "not", "None", ":", "return", "TransactionSelector", "(", "id", "=", "self", ".", "_transaction_id", ")", "if", "self", ".", "_read_timestamp", ":", "key", "=", "\"read_timestamp\"", "value", "=", "_datetime_to_pb_timestamp", "(", "self", ".", "_read_timestamp", ")", "elif", "self", ".", "_min_read_timestamp", ":", "key", "=", "\"min_read_timestamp\"", "value", "=", "_datetime_to_pb_timestamp", "(", "self", ".", "_min_read_timestamp", ")", "elif", "self", ".", "_max_staleness", ":", "key", "=", "\"max_staleness\"", "value", "=", "_timedelta_to_duration_pb", "(", "self", ".", "_max_staleness", ")", "elif", "self", ".", "_exact_staleness", ":", "key", "=", "\"exact_staleness\"", "value", "=", "_timedelta_to_duration_pb", "(", "self", ".", "_exact_staleness", ")", "else", ":", "key", "=", "\"strong\"", "value", "=", "True", "options", "=", "TransactionOptions", "(", "read_only", "=", "TransactionOptions", ".", "ReadOnly", "(", "*", "*", "{", "key", ":", "value", "}", ")", ")", "if", "self", ".", "_multi_use", ":", "return", "TransactionSelector", "(", "begin", "=", "options", ")", "else", ":", "return", "TransactionSelector", "(", "single_use", "=", "options", ")" ]
Helper for :meth:`read`.
[ "Helper", "for", ":", "meth", ":", "read", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/snapshot.py#L449-L477
train
googleapis/google-cloud-python
spanner/google/cloud/spanner_v1/snapshot.py
Snapshot.begin
def begin(self): """Begin a read-only transaction on the database. :rtype: bytes :returns: the ID for the newly-begun transaction. :raises ValueError: if the transaction is already begun, committed, or rolled back. """ if not self._multi_use: raise ValueError("Cannot call 'begin' on single-use snapshots") if self._transaction_id is not None: raise ValueError("Read-only transaction already begun") if self._read_request_count > 0: raise ValueError("Read-only transaction already pending") database = self._session._database api = database.spanner_api metadata = _metadata_with_prefix(database.name) txn_selector = self._make_txn_selector() response = api.begin_transaction( self._session.name, txn_selector.begin, metadata=metadata ) self._transaction_id = response.id return self._transaction_id
python
def begin(self): """Begin a read-only transaction on the database. :rtype: bytes :returns: the ID for the newly-begun transaction. :raises ValueError: if the transaction is already begun, committed, or rolled back. """ if not self._multi_use: raise ValueError("Cannot call 'begin' on single-use snapshots") if self._transaction_id is not None: raise ValueError("Read-only transaction already begun") if self._read_request_count > 0: raise ValueError("Read-only transaction already pending") database = self._session._database api = database.spanner_api metadata = _metadata_with_prefix(database.name) txn_selector = self._make_txn_selector() response = api.begin_transaction( self._session.name, txn_selector.begin, metadata=metadata ) self._transaction_id = response.id return self._transaction_id
[ "def", "begin", "(", "self", ")", ":", "if", "not", "self", ".", "_multi_use", ":", "raise", "ValueError", "(", "\"Cannot call 'begin' on single-use snapshots\"", ")", "if", "self", ".", "_transaction_id", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Read-only transaction already begun\"", ")", "if", "self", ".", "_read_request_count", ">", "0", ":", "raise", "ValueError", "(", "\"Read-only transaction already pending\"", ")", "database", "=", "self", ".", "_session", ".", "_database", "api", "=", "database", ".", "spanner_api", "metadata", "=", "_metadata_with_prefix", "(", "database", ".", "name", ")", "txn_selector", "=", "self", ".", "_make_txn_selector", "(", ")", "response", "=", "api", ".", "begin_transaction", "(", "self", ".", "_session", ".", "name", ",", "txn_selector", ".", "begin", ",", "metadata", "=", "metadata", ")", "self", ".", "_transaction_id", "=", "response", ".", "id", "return", "self", ".", "_transaction_id" ]
Begin a read-only transaction on the database. :rtype: bytes :returns: the ID for the newly-begun transaction. :raises ValueError: if the transaction is already begun, committed, or rolled back.
[ "Begin", "a", "read", "-", "only", "transaction", "on", "the", "database", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/snapshot.py#L479-L505
train
googleapis/google-cloud-python
api_core/google/api_core/gapic_v1/client_info.py
ClientInfo.to_user_agent
def to_user_agent(self): """Returns the user-agent string for this client info.""" # Note: the order here is important as the internal metrics system # expects these items to be in specific locations. ua = "" if self.user_agent is not None: ua += "{user_agent} " ua += "gl-python/{python_version} " if self.grpc_version is not None: ua += "grpc/{grpc_version} " ua += "gax/{api_core_version} " if self.gapic_version is not None: ua += "gapic/{gapic_version} " if self.client_library_version is not None: ua += "gccl/{client_library_version} " return ua.format(**self.__dict__).strip()
python
def to_user_agent(self): """Returns the user-agent string for this client info.""" # Note: the order here is important as the internal metrics system # expects these items to be in specific locations. ua = "" if self.user_agent is not None: ua += "{user_agent} " ua += "gl-python/{python_version} " if self.grpc_version is not None: ua += "grpc/{grpc_version} " ua += "gax/{api_core_version} " if self.gapic_version is not None: ua += "gapic/{gapic_version} " if self.client_library_version is not None: ua += "gccl/{client_library_version} " return ua.format(**self.__dict__).strip()
[ "def", "to_user_agent", "(", "self", ")", ":", "# Note: the order here is important as the internal metrics system", "# expects these items to be in specific locations.", "ua", "=", "\"\"", "if", "self", ".", "user_agent", "is", "not", "None", ":", "ua", "+=", "\"{user_agent} \"", "ua", "+=", "\"gl-python/{python_version} \"", "if", "self", ".", "grpc_version", "is", "not", "None", ":", "ua", "+=", "\"grpc/{grpc_version} \"", "ua", "+=", "\"gax/{api_core_version} \"", "if", "self", ".", "gapic_version", "is", "not", "None", ":", "ua", "+=", "\"gapic/{gapic_version} \"", "if", "self", ".", "client_library_version", "is", "not", "None", ":", "ua", "+=", "\"gccl/{client_library_version} \"", "return", "ua", ".", "format", "(", "*", "*", "self", ".", "__dict__", ")", ".", "strip", "(", ")" ]
Returns the user-agent string for this client info.
[ "Returns", "the", "user", "-", "agent", "string", "for", "this", "client", "info", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/api_core/google/api_core/gapic_v1/client_info.py#L75-L98
train
googleapis/google-cloud-python
bigquery_storage/google/cloud/bigquery_storage_v1beta1/client.py
BigQueryStorageClient.read_rows
def read_rows( self, read_position, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Reads rows from the table in the format prescribed by the read session. Each response contains one or more table rows, up to a maximum of 10 MiB per response; read requests which attempt to read individual rows larger than this will fail. Each request also returns a set of stream statistics reflecting the estimated total number of rows in the read stream. This number is computed based on the total table size and the number of active streams in the read session, and may change as other streams continue to read data. Example: >>> from google.cloud import bigquery_storage_v1beta1 >>> >>> client = bigquery_storage_v1beta1.BigQueryStorageClient() >>> >>> # TODO: Initialize ``table_reference``: >>> table_reference = { ... 'project_id': 'your-data-project-id', ... 'dataset_id': 'your_dataset_id', ... 'table_id': 'your_table_id', ... } >>> >>> # TODO: Initialize `parent`: >>> parent = 'projects/your-billing-project-id' >>> >>> session = client.create_read_session(table_reference, parent) >>> read_position = bigquery_storage_v1beta1.types.StreamPosition( ... stream=session.streams[0], # TODO: Read the other streams. ... ) >>> >>> for element in client.read_rows(read_position): ... # process element ... pass Args: read_position (Union[ \ dict, \ ~google.cloud.bigquery_storage_v1beta1.types.StreamPosition \ ]): Required. Identifier of the position in the stream to start reading from. The offset requested must be less than the last row read from ReadRows. Requesting a larger offset is undefined. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigquery_storage_v1beta1.types.StreamPosition` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: ~google.cloud.bigquery_storage_v1beta1.reader.ReadRowsStream: An iterable of :class:`~google.cloud.bigquery_storage_v1beta1.types.ReadRowsResponse`. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ gapic_client = super(BigQueryStorageClient, self) stream = gapic_client.read_rows( read_position, retry=retry, timeout=timeout, metadata=metadata ) return reader.ReadRowsStream( stream, gapic_client, read_position, {"retry": retry, "timeout": timeout, "metadata": metadata}, )
python
def read_rows( self, read_position, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Reads rows from the table in the format prescribed by the read session. Each response contains one or more table rows, up to a maximum of 10 MiB per response; read requests which attempt to read individual rows larger than this will fail. Each request also returns a set of stream statistics reflecting the estimated total number of rows in the read stream. This number is computed based on the total table size and the number of active streams in the read session, and may change as other streams continue to read data. Example: >>> from google.cloud import bigquery_storage_v1beta1 >>> >>> client = bigquery_storage_v1beta1.BigQueryStorageClient() >>> >>> # TODO: Initialize ``table_reference``: >>> table_reference = { ... 'project_id': 'your-data-project-id', ... 'dataset_id': 'your_dataset_id', ... 'table_id': 'your_table_id', ... } >>> >>> # TODO: Initialize `parent`: >>> parent = 'projects/your-billing-project-id' >>> >>> session = client.create_read_session(table_reference, parent) >>> read_position = bigquery_storage_v1beta1.types.StreamPosition( ... stream=session.streams[0], # TODO: Read the other streams. ... ) >>> >>> for element in client.read_rows(read_position): ... # process element ... pass Args: read_position (Union[ \ dict, \ ~google.cloud.bigquery_storage_v1beta1.types.StreamPosition \ ]): Required. Identifier of the position in the stream to start reading from. The offset requested must be less than the last row read from ReadRows. Requesting a larger offset is undefined. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigquery_storage_v1beta1.types.StreamPosition` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: ~google.cloud.bigquery_storage_v1beta1.reader.ReadRowsStream: An iterable of :class:`~google.cloud.bigquery_storage_v1beta1.types.ReadRowsResponse`. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ gapic_client = super(BigQueryStorageClient, self) stream = gapic_client.read_rows( read_position, retry=retry, timeout=timeout, metadata=metadata ) return reader.ReadRowsStream( stream, gapic_client, read_position, {"retry": retry, "timeout": timeout, "metadata": metadata}, )
[ "def", "read_rows", "(", "self", ",", "read_position", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "metadata", "=", "None", ",", ")", ":", "gapic_client", "=", "super", "(", "BigQueryStorageClient", ",", "self", ")", "stream", "=", "gapic_client", ".", "read_rows", "(", "read_position", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ")", "return", "reader", ".", "ReadRowsStream", "(", "stream", ",", "gapic_client", ",", "read_position", ",", "{", "\"retry\"", ":", "retry", ",", "\"timeout\"", ":", "timeout", ",", "\"metadata\"", ":", "metadata", "}", ",", ")" ]
Reads rows from the table in the format prescribed by the read session. Each response contains one or more table rows, up to a maximum of 10 MiB per response; read requests which attempt to read individual rows larger than this will fail. Each request also returns a set of stream statistics reflecting the estimated total number of rows in the read stream. This number is computed based on the total table size and the number of active streams in the read session, and may change as other streams continue to read data. Example: >>> from google.cloud import bigquery_storage_v1beta1 >>> >>> client = bigquery_storage_v1beta1.BigQueryStorageClient() >>> >>> # TODO: Initialize ``table_reference``: >>> table_reference = { ... 'project_id': 'your-data-project-id', ... 'dataset_id': 'your_dataset_id', ... 'table_id': 'your_table_id', ... } >>> >>> # TODO: Initialize `parent`: >>> parent = 'projects/your-billing-project-id' >>> >>> session = client.create_read_session(table_reference, parent) >>> read_position = bigquery_storage_v1beta1.types.StreamPosition( ... stream=session.streams[0], # TODO: Read the other streams. ... ) >>> >>> for element in client.read_rows(read_position): ... # process element ... pass Args: read_position (Union[ \ dict, \ ~google.cloud.bigquery_storage_v1beta1.types.StreamPosition \ ]): Required. Identifier of the position in the stream to start reading from. The offset requested must be less than the last row read from ReadRows. Requesting a larger offset is undefined. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigquery_storage_v1beta1.types.StreamPosition` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: ~google.cloud.bigquery_storage_v1beta1.reader.ReadRowsStream: An iterable of :class:`~google.cloud.bigquery_storage_v1beta1.types.ReadRowsResponse`. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
[ "Reads", "rows", "from", "the", "table", "in", "the", "format", "prescribed", "by", "the", "read", "session", ".", "Each", "response", "contains", "one", "or", "more", "table", "rows", "up", "to", "a", "maximum", "of", "10", "MiB", "per", "response", ";", "read", "requests", "which", "attempt", "to", "read", "individual", "rows", "larger", "than", "this", "will", "fail", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery_storage/google/cloud/bigquery_storage_v1beta1/client.py#L42-L126
train
googleapis/google-cloud-python
trace/google/cloud/trace/v1/_gapic.py
make_trace_api
def make_trace_api(client): """ Create an instance of the gapic Trace API. Args: client (~google.cloud.trace.client.Client): The client that holds configuration details. Returns: A :class:`~google.cloud.trace._gapic._TraceAPI` instance with the proper configurations. """ generated = trace_service_client.TraceServiceClient( credentials=client._credentials, client_info=_CLIENT_INFO ) return _TraceAPI(generated, client)
python
def make_trace_api(client): """ Create an instance of the gapic Trace API. Args: client (~google.cloud.trace.client.Client): The client that holds configuration details. Returns: A :class:`~google.cloud.trace._gapic._TraceAPI` instance with the proper configurations. """ generated = trace_service_client.TraceServiceClient( credentials=client._credentials, client_info=_CLIENT_INFO ) return _TraceAPI(generated, client)
[ "def", "make_trace_api", "(", "client", ")", ":", "generated", "=", "trace_service_client", ".", "TraceServiceClient", "(", "credentials", "=", "client", ".", "_credentials", ",", "client_info", "=", "_CLIENT_INFO", ")", "return", "_TraceAPI", "(", "generated", ",", "client", ")" ]
Create an instance of the gapic Trace API. Args: client (~google.cloud.trace.client.Client): The client that holds configuration details. Returns: A :class:`~google.cloud.trace._gapic._TraceAPI` instance with the proper configurations.
[ "Create", "an", "instance", "of", "the", "gapic", "Trace", "API", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/trace/google/cloud/trace/v1/_gapic.py#L168-L183
train
googleapis/google-cloud-python
trace/google/cloud/trace/v1/_gapic.py
_TraceAPI.patch_traces
def patch_traces(self, project_id, traces): """ Sends new traces to Stackdriver Trace or updates existing traces. Args: project_id (Optional[str]): ID of the Cloud project where the trace data is stored. traces (dict): Required. The traces to be patched in the API call. """ traces_pb = _traces_mapping_to_pb(traces) self._gapic_api.patch_traces(project_id, traces_pb)
python
def patch_traces(self, project_id, traces): """ Sends new traces to Stackdriver Trace or updates existing traces. Args: project_id (Optional[str]): ID of the Cloud project where the trace data is stored. traces (dict): Required. The traces to be patched in the API call. """ traces_pb = _traces_mapping_to_pb(traces) self._gapic_api.patch_traces(project_id, traces_pb)
[ "def", "patch_traces", "(", "self", ",", "project_id", ",", "traces", ")", ":", "traces_pb", "=", "_traces_mapping_to_pb", "(", "traces", ")", "self", ".", "_gapic_api", ".", "patch_traces", "(", "project_id", ",", "traces_pb", ")" ]
Sends new traces to Stackdriver Trace or updates existing traces. Args: project_id (Optional[str]): ID of the Cloud project where the trace data is stored. traces (dict): Required. The traces to be patched in the API call.
[ "Sends", "new", "traces", "to", "Stackdriver", "Trace", "or", "updates", "existing", "traces", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/trace/google/cloud/trace/v1/_gapic.py#L47-L57
train
googleapis/google-cloud-python
trace/google/cloud/trace/v1/_gapic.py
_TraceAPI.get_trace
def get_trace(self, project_id, trace_id): """ Gets a single trace by its ID. Args: trace_id (str): ID of the trace to return. project_id (str): Required. ID of the Cloud project where the trace data is stored. Returns: A Trace dict. """ trace_pb = self._gapic_api.get_trace(project_id, trace_id) trace_mapping = _parse_trace_pb(trace_pb) return trace_mapping
python
def get_trace(self, project_id, trace_id): """ Gets a single trace by its ID. Args: trace_id (str): ID of the trace to return. project_id (str): Required. ID of the Cloud project where the trace data is stored. Returns: A Trace dict. """ trace_pb = self._gapic_api.get_trace(project_id, trace_id) trace_mapping = _parse_trace_pb(trace_pb) return trace_mapping
[ "def", "get_trace", "(", "self", ",", "project_id", ",", "trace_id", ")", ":", "trace_pb", "=", "self", ".", "_gapic_api", ".", "get_trace", "(", "project_id", ",", "trace_id", ")", "trace_mapping", "=", "_parse_trace_pb", "(", "trace_pb", ")", "return", "trace_mapping" ]
Gets a single trace by its ID. Args: trace_id (str): ID of the trace to return. project_id (str): Required. ID of the Cloud project where the trace data is stored. Returns: A Trace dict.
[ "Gets", "a", "single", "trace", "by", "its", "ID", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/trace/google/cloud/trace/v1/_gapic.py#L59-L73
train
googleapis/google-cloud-python
trace/google/cloud/trace/v1/_gapic.py
_TraceAPI.list_traces
def list_traces( self, project_id, view=None, page_size=None, start_time=None, end_time=None, filter_=None, order_by=None, page_token=None, ): """ Returns of a list of traces that match the filter conditions. Args: project_id (Optional[str]): ID of the Cloud project where the trace data is stored. view (Optional[~google.cloud.trace_v1.gapic.enums. ListTracesRequest.ViewType]): Type of data returned for traces in the list. Default is ``MINIMAL``. page_size (Optional[int]): Maximum number of traces to return. If not specified or <= 0, the implementation selects a reasonable value. The implementation may return fewer traces than the requested page size. start_time (Optional[~datetime.datetime]): Start of the time interval (inclusive) during which the trace data was collected from the application. end_time (Optional[~datetime.datetime]): End of the time interval (inclusive) during which the trace data was collected from the application. filter_ (Optional[str]): An optional filter for the request. order_by (Optional[str]): Field used to sort the returned traces. page_token (Optional[str]): opaque marker for the next "page" of entries. If not passed, the API will return the first page of entries. Returns: A :class:`~google.api_core.page_iterator.Iterator` of traces that match the specified filter conditions. """ page_iter = self._gapic_api.list_traces( project_id=project_id, view=view, page_size=page_size, start_time=start_time, end_time=end_time, filter_=filter_, order_by=order_by, ) page_iter.item_to_value = _item_to_mapping page_iter.next_page_token = page_token return page_iter
python
def list_traces( self, project_id, view=None, page_size=None, start_time=None, end_time=None, filter_=None, order_by=None, page_token=None, ): """ Returns of a list of traces that match the filter conditions. Args: project_id (Optional[str]): ID of the Cloud project where the trace data is stored. view (Optional[~google.cloud.trace_v1.gapic.enums. ListTracesRequest.ViewType]): Type of data returned for traces in the list. Default is ``MINIMAL``. page_size (Optional[int]): Maximum number of traces to return. If not specified or <= 0, the implementation selects a reasonable value. The implementation may return fewer traces than the requested page size. start_time (Optional[~datetime.datetime]): Start of the time interval (inclusive) during which the trace data was collected from the application. end_time (Optional[~datetime.datetime]): End of the time interval (inclusive) during which the trace data was collected from the application. filter_ (Optional[str]): An optional filter for the request. order_by (Optional[str]): Field used to sort the returned traces. page_token (Optional[str]): opaque marker for the next "page" of entries. If not passed, the API will return the first page of entries. Returns: A :class:`~google.api_core.page_iterator.Iterator` of traces that match the specified filter conditions. """ page_iter = self._gapic_api.list_traces( project_id=project_id, view=view, page_size=page_size, start_time=start_time, end_time=end_time, filter_=filter_, order_by=order_by, ) page_iter.item_to_value = _item_to_mapping page_iter.next_page_token = page_token return page_iter
[ "def", "list_traces", "(", "self", ",", "project_id", ",", "view", "=", "None", ",", "page_size", "=", "None", ",", "start_time", "=", "None", ",", "end_time", "=", "None", ",", "filter_", "=", "None", ",", "order_by", "=", "None", ",", "page_token", "=", "None", ",", ")", ":", "page_iter", "=", "self", ".", "_gapic_api", ".", "list_traces", "(", "project_id", "=", "project_id", ",", "view", "=", "view", ",", "page_size", "=", "page_size", ",", "start_time", "=", "start_time", ",", "end_time", "=", "end_time", ",", "filter_", "=", "filter_", ",", "order_by", "=", "order_by", ",", ")", "page_iter", ".", "item_to_value", "=", "_item_to_mapping", "page_iter", ".", "next_page_token", "=", "page_token", "return", "page_iter" ]
Returns of a list of traces that match the filter conditions. Args: project_id (Optional[str]): ID of the Cloud project where the trace data is stored. view (Optional[~google.cloud.trace_v1.gapic.enums. ListTracesRequest.ViewType]): Type of data returned for traces in the list. Default is ``MINIMAL``. page_size (Optional[int]): Maximum number of traces to return. If not specified or <= 0, the implementation selects a reasonable value. The implementation may return fewer traces than the requested page size. start_time (Optional[~datetime.datetime]): Start of the time interval (inclusive) during which the trace data was collected from the application. end_time (Optional[~datetime.datetime]): End of the time interval (inclusive) during which the trace data was collected from the application. filter_ (Optional[str]): An optional filter for the request. order_by (Optional[str]): Field used to sort the returned traces. page_token (Optional[str]): opaque marker for the next "page" of entries. If not passed, the API will return the first page of entries. Returns: A :class:`~google.api_core.page_iterator.Iterator` of traces that match the specified filter conditions.
[ "Returns", "of", "a", "list", "of", "traces", "that", "match", "the", "filter", "conditions", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/trace/google/cloud/trace/v1/_gapic.py#L75-L133
train
googleapis/google-cloud-python
storage/google/cloud/storage/client.py
_item_to_bucket
def _item_to_bucket(iterator, item): """Convert a JSON bucket to the native object. :type iterator: :class:`~google.api_core.page_iterator.Iterator` :param iterator: The iterator that has retrieved the item. :type item: dict :param item: An item to be converted to a bucket. :rtype: :class:`.Bucket` :returns: The next bucket in the page. """ name = item.get("name") bucket = Bucket(iterator.client, name) bucket._set_properties(item) return bucket
python
def _item_to_bucket(iterator, item): """Convert a JSON bucket to the native object. :type iterator: :class:`~google.api_core.page_iterator.Iterator` :param iterator: The iterator that has retrieved the item. :type item: dict :param item: An item to be converted to a bucket. :rtype: :class:`.Bucket` :returns: The next bucket in the page. """ name = item.get("name") bucket = Bucket(iterator.client, name) bucket._set_properties(item) return bucket
[ "def", "_item_to_bucket", "(", "iterator", ",", "item", ")", ":", "name", "=", "item", ".", "get", "(", "\"name\"", ")", "bucket", "=", "Bucket", "(", "iterator", ".", "client", ",", "name", ")", "bucket", ".", "_set_properties", "(", "item", ")", "return", "bucket" ]
Convert a JSON bucket to the native object. :type iterator: :class:`~google.api_core.page_iterator.Iterator` :param iterator: The iterator that has retrieved the item. :type item: dict :param item: An item to be converted to a bucket. :rtype: :class:`.Bucket` :returns: The next bucket in the page.
[ "Convert", "a", "JSON", "bucket", "to", "the", "native", "object", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/client.py#L374-L389
train
googleapis/google-cloud-python
storage/google/cloud/storage/client.py
Client.create_anonymous_client
def create_anonymous_client(cls): """Factory: return client with anonymous credentials. .. note:: Such a client has only limited access to "public" buckets: listing their contents and downloading their blobs. :rtype: :class:`google.cloud.storage.client.Client` :returns: Instance w/ anonymous credentials and no project. """ client = cls(project="<none>", credentials=AnonymousCredentials()) client.project = None return client
python
def create_anonymous_client(cls): """Factory: return client with anonymous credentials. .. note:: Such a client has only limited access to "public" buckets: listing their contents and downloading their blobs. :rtype: :class:`google.cloud.storage.client.Client` :returns: Instance w/ anonymous credentials and no project. """ client = cls(project="<none>", credentials=AnonymousCredentials()) client.project = None return client
[ "def", "create_anonymous_client", "(", "cls", ")", ":", "client", "=", "cls", "(", "project", "=", "\"<none>\"", ",", "credentials", "=", "AnonymousCredentials", "(", ")", ")", "client", ".", "project", "=", "None", "return", "client" ]
Factory: return client with anonymous credentials. .. note:: Such a client has only limited access to "public" buckets: listing their contents and downloading their blobs. :rtype: :class:`google.cloud.storage.client.Client` :returns: Instance w/ anonymous credentials and no project.
[ "Factory", ":", "return", "client", "with", "anonymous", "credentials", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/client.py#L81-L94
train
googleapis/google-cloud-python
storage/google/cloud/storage/client.py
Client.get_service_account_email
def get_service_account_email(self, project=None): """Get the email address of the project's GCS service account :type project: str :param project: (Optional) Project ID to use for retreiving GCS service account email address. Defaults to the client's project. :rtype: str :returns: service account email address """ if project is None: project = self.project path = "/projects/%s/serviceAccount" % (project,) api_response = self._base_connection.api_request(method="GET", path=path) return api_response["email_address"]
python
def get_service_account_email(self, project=None): """Get the email address of the project's GCS service account :type project: str :param project: (Optional) Project ID to use for retreiving GCS service account email address. Defaults to the client's project. :rtype: str :returns: service account email address """ if project is None: project = self.project path = "/projects/%s/serviceAccount" % (project,) api_response = self._base_connection.api_request(method="GET", path=path) return api_response["email_address"]
[ "def", "get_service_account_email", "(", "self", ",", "project", "=", "None", ")", ":", "if", "project", "is", "None", ":", "project", "=", "self", ".", "project", "path", "=", "\"/projects/%s/serviceAccount\"", "%", "(", "project", ",", ")", "api_response", "=", "self", ".", "_base_connection", ".", "api_request", "(", "method", "=", "\"GET\"", ",", "path", "=", "path", ")", "return", "api_response", "[", "\"email_address\"", "]" ]
Get the email address of the project's GCS service account :type project: str :param project: (Optional) Project ID to use for retreiving GCS service account email address. Defaults to the client's project. :rtype: str :returns: service account email address
[ "Get", "the", "email", "address", "of", "the", "project", "s", "GCS", "service", "account" ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/client.py#L157-L172
train
googleapis/google-cloud-python
storage/google/cloud/storage/client.py
Client.bucket
def bucket(self, bucket_name, user_project=None): """Factory constructor for bucket object. .. note:: This will not make an HTTP request; it simply instantiates a bucket object owned by this client. :type bucket_name: str :param bucket_name: The name of the bucket to be instantiated. :type user_project: str :param user_project: (Optional) the project ID to be billed for API requests made via the bucket. :rtype: :class:`google.cloud.storage.bucket.Bucket` :returns: The bucket object created. """ return Bucket(client=self, name=bucket_name, user_project=user_project)
python
def bucket(self, bucket_name, user_project=None): """Factory constructor for bucket object. .. note:: This will not make an HTTP request; it simply instantiates a bucket object owned by this client. :type bucket_name: str :param bucket_name: The name of the bucket to be instantiated. :type user_project: str :param user_project: (Optional) the project ID to be billed for API requests made via the bucket. :rtype: :class:`google.cloud.storage.bucket.Bucket` :returns: The bucket object created. """ return Bucket(client=self, name=bucket_name, user_project=user_project)
[ "def", "bucket", "(", "self", ",", "bucket_name", ",", "user_project", "=", "None", ")", ":", "return", "Bucket", "(", "client", "=", "self", ",", "name", "=", "bucket_name", ",", "user_project", "=", "user_project", ")" ]
Factory constructor for bucket object. .. note:: This will not make an HTTP request; it simply instantiates a bucket object owned by this client. :type bucket_name: str :param bucket_name: The name of the bucket to be instantiated. :type user_project: str :param user_project: (Optional) the project ID to be billed for API requests made via the bucket. :rtype: :class:`google.cloud.storage.bucket.Bucket` :returns: The bucket object created.
[ "Factory", "constructor", "for", "bucket", "object", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/client.py#L174-L191
train
googleapis/google-cloud-python
storage/google/cloud/storage/client.py
Client.get_bucket
def get_bucket(self, bucket_name): """Get a bucket by name. If the bucket isn't found, this will raise a :class:`google.cloud.exceptions.NotFound`. For example: .. literalinclude:: snippets.py :start-after: [START get_bucket] :end-before: [END get_bucket] This implements "storage.buckets.get". :type bucket_name: str :param bucket_name: The name of the bucket to get. :rtype: :class:`google.cloud.storage.bucket.Bucket` :returns: The bucket matching the name provided. :raises: :class:`google.cloud.exceptions.NotFound` """ bucket = Bucket(self, name=bucket_name) bucket.reload(client=self) return bucket
python
def get_bucket(self, bucket_name): """Get a bucket by name. If the bucket isn't found, this will raise a :class:`google.cloud.exceptions.NotFound`. For example: .. literalinclude:: snippets.py :start-after: [START get_bucket] :end-before: [END get_bucket] This implements "storage.buckets.get". :type bucket_name: str :param bucket_name: The name of the bucket to get. :rtype: :class:`google.cloud.storage.bucket.Bucket` :returns: The bucket matching the name provided. :raises: :class:`google.cloud.exceptions.NotFound` """ bucket = Bucket(self, name=bucket_name) bucket.reload(client=self) return bucket
[ "def", "get_bucket", "(", "self", ",", "bucket_name", ")", ":", "bucket", "=", "Bucket", "(", "self", ",", "name", "=", "bucket_name", ")", "bucket", ".", "reload", "(", "client", "=", "self", ")", "return", "bucket" ]
Get a bucket by name. If the bucket isn't found, this will raise a :class:`google.cloud.exceptions.NotFound`. For example: .. literalinclude:: snippets.py :start-after: [START get_bucket] :end-before: [END get_bucket] This implements "storage.buckets.get". :type bucket_name: str :param bucket_name: The name of the bucket to get. :rtype: :class:`google.cloud.storage.bucket.Bucket` :returns: The bucket matching the name provided. :raises: :class:`google.cloud.exceptions.NotFound`
[ "Get", "a", "bucket", "by", "name", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/client.py#L205-L228
train
googleapis/google-cloud-python
storage/google/cloud/storage/client.py
Client.create_bucket
def create_bucket(self, bucket_name, requester_pays=None, project=None): """Create a new bucket. For example: .. literalinclude:: snippets.py :start-after: [START create_bucket] :end-before: [END create_bucket] This implements "storage.buckets.insert". If the bucket already exists, will raise :class:`google.cloud.exceptions.Conflict`. To set additional properties when creating a bucket, such as the bucket location, use :meth:`~.Bucket.create`. :type bucket_name: str :param bucket_name: The bucket name to create. :type requester_pays: bool :param requester_pays: (Optional) Whether requester pays for API requests for this bucket and its blobs. :type project: str :param project: (Optional) the project under which the bucket is to be created. If not passed, uses the project set on the client. :rtype: :class:`google.cloud.storage.bucket.Bucket` :returns: The newly created bucket. """ bucket = Bucket(self, name=bucket_name) if requester_pays is not None: bucket.requester_pays = requester_pays bucket.create(client=self, project=project) return bucket
python
def create_bucket(self, bucket_name, requester_pays=None, project=None): """Create a new bucket. For example: .. literalinclude:: snippets.py :start-after: [START create_bucket] :end-before: [END create_bucket] This implements "storage.buckets.insert". If the bucket already exists, will raise :class:`google.cloud.exceptions.Conflict`. To set additional properties when creating a bucket, such as the bucket location, use :meth:`~.Bucket.create`. :type bucket_name: str :param bucket_name: The bucket name to create. :type requester_pays: bool :param requester_pays: (Optional) Whether requester pays for API requests for this bucket and its blobs. :type project: str :param project: (Optional) the project under which the bucket is to be created. If not passed, uses the project set on the client. :rtype: :class:`google.cloud.storage.bucket.Bucket` :returns: The newly created bucket. """ bucket = Bucket(self, name=bucket_name) if requester_pays is not None: bucket.requester_pays = requester_pays bucket.create(client=self, project=project) return bucket
[ "def", "create_bucket", "(", "self", ",", "bucket_name", ",", "requester_pays", "=", "None", ",", "project", "=", "None", ")", ":", "bucket", "=", "Bucket", "(", "self", ",", "name", "=", "bucket_name", ")", "if", "requester_pays", "is", "not", "None", ":", "bucket", ".", "requester_pays", "=", "requester_pays", "bucket", ".", "create", "(", "client", "=", "self", ",", "project", "=", "project", ")", "return", "bucket" ]
Create a new bucket. For example: .. literalinclude:: snippets.py :start-after: [START create_bucket] :end-before: [END create_bucket] This implements "storage.buckets.insert". If the bucket already exists, will raise :class:`google.cloud.exceptions.Conflict`. To set additional properties when creating a bucket, such as the bucket location, use :meth:`~.Bucket.create`. :type bucket_name: str :param bucket_name: The bucket name to create. :type requester_pays: bool :param requester_pays: (Optional) Whether requester pays for API requests for this bucket and its blobs. :type project: str :param project: (Optional) the project under which the bucket is to be created. If not passed, uses the project set on the client. :rtype: :class:`google.cloud.storage.bucket.Bucket` :returns: The newly created bucket.
[ "Create", "a", "new", "bucket", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/client.py#L251-L288
train
googleapis/google-cloud-python
storage/google/cloud/storage/client.py
Client.list_buckets
def list_buckets( self, max_results=None, page_token=None, prefix=None, projection="noAcl", fields=None, project=None, ): """Get all buckets in the project associated to the client. This will not populate the list of blobs available in each bucket. .. literalinclude:: snippets.py :start-after: [START list_buckets] :end-before: [END list_buckets] This implements "storage.buckets.list". :type max_results: int :param max_results: Optional. The maximum number of buckets to return. :type page_token: str :param page_token: Optional. If present, return the next batch of buckets, using the value, which must correspond to the ``nextPageToken`` value returned in the previous response. Deprecated: use the ``pages`` property of the returned iterator instead of manually passing the token. :type prefix: str :param prefix: Optional. Filter results to buckets whose names begin with this prefix. :type projection: str :param projection: (Optional) Specifies the set of properties to return. If used, must be 'full' or 'noAcl'. Defaults to 'noAcl'. :type fields: str :param fields: (Optional) Selector specifying which fields to include in a partial response. Must be a list of fields. For example to get a partial response with just the next page token and the language of each bucket returned: 'items/id,nextPageToken' :type project: str :param project: (Optional) the project whose buckets are to be listed. If not passed, uses the project set on the client. :rtype: :class:`~google.api_core.page_iterator.Iterator` :raises ValueError: if both ``project`` is ``None`` and the client's project is also ``None``. :returns: Iterator of all :class:`~google.cloud.storage.bucket.Bucket` belonging to this project. """ if project is None: project = self.project if project is None: raise ValueError("Client project not set: pass an explicit project.") extra_params = {"project": project} if prefix is not None: extra_params["prefix"] = prefix extra_params["projection"] = projection if fields is not None: extra_params["fields"] = fields return page_iterator.HTTPIterator( client=self, api_request=self._connection.api_request, path="/b", item_to_value=_item_to_bucket, page_token=page_token, max_results=max_results, extra_params=extra_params, )
python
def list_buckets( self, max_results=None, page_token=None, prefix=None, projection="noAcl", fields=None, project=None, ): """Get all buckets in the project associated to the client. This will not populate the list of blobs available in each bucket. .. literalinclude:: snippets.py :start-after: [START list_buckets] :end-before: [END list_buckets] This implements "storage.buckets.list". :type max_results: int :param max_results: Optional. The maximum number of buckets to return. :type page_token: str :param page_token: Optional. If present, return the next batch of buckets, using the value, which must correspond to the ``nextPageToken`` value returned in the previous response. Deprecated: use the ``pages`` property of the returned iterator instead of manually passing the token. :type prefix: str :param prefix: Optional. Filter results to buckets whose names begin with this prefix. :type projection: str :param projection: (Optional) Specifies the set of properties to return. If used, must be 'full' or 'noAcl'. Defaults to 'noAcl'. :type fields: str :param fields: (Optional) Selector specifying which fields to include in a partial response. Must be a list of fields. For example to get a partial response with just the next page token and the language of each bucket returned: 'items/id,nextPageToken' :type project: str :param project: (Optional) the project whose buckets are to be listed. If not passed, uses the project set on the client. :rtype: :class:`~google.api_core.page_iterator.Iterator` :raises ValueError: if both ``project`` is ``None`` and the client's project is also ``None``. :returns: Iterator of all :class:`~google.cloud.storage.bucket.Bucket` belonging to this project. """ if project is None: project = self.project if project is None: raise ValueError("Client project not set: pass an explicit project.") extra_params = {"project": project} if prefix is not None: extra_params["prefix"] = prefix extra_params["projection"] = projection if fields is not None: extra_params["fields"] = fields return page_iterator.HTTPIterator( client=self, api_request=self._connection.api_request, path="/b", item_to_value=_item_to_bucket, page_token=page_token, max_results=max_results, extra_params=extra_params, )
[ "def", "list_buckets", "(", "self", ",", "max_results", "=", "None", ",", "page_token", "=", "None", ",", "prefix", "=", "None", ",", "projection", "=", "\"noAcl\"", ",", "fields", "=", "None", ",", "project", "=", "None", ",", ")", ":", "if", "project", "is", "None", ":", "project", "=", "self", ".", "project", "if", "project", "is", "None", ":", "raise", "ValueError", "(", "\"Client project not set: pass an explicit project.\"", ")", "extra_params", "=", "{", "\"project\"", ":", "project", "}", "if", "prefix", "is", "not", "None", ":", "extra_params", "[", "\"prefix\"", "]", "=", "prefix", "extra_params", "[", "\"projection\"", "]", "=", "projection", "if", "fields", "is", "not", "None", ":", "extra_params", "[", "\"fields\"", "]", "=", "fields", "return", "page_iterator", ".", "HTTPIterator", "(", "client", "=", "self", ",", "api_request", "=", "self", ".", "_connection", ".", "api_request", ",", "path", "=", "\"/b\"", ",", "item_to_value", "=", "_item_to_bucket", ",", "page_token", "=", "page_token", ",", "max_results", "=", "max_results", ",", "extra_params", "=", "extra_params", ",", ")" ]
Get all buckets in the project associated to the client. This will not populate the list of blobs available in each bucket. .. literalinclude:: snippets.py :start-after: [START list_buckets] :end-before: [END list_buckets] This implements "storage.buckets.list". :type max_results: int :param max_results: Optional. The maximum number of buckets to return. :type page_token: str :param page_token: Optional. If present, return the next batch of buckets, using the value, which must correspond to the ``nextPageToken`` value returned in the previous response. Deprecated: use the ``pages`` property of the returned iterator instead of manually passing the token. :type prefix: str :param prefix: Optional. Filter results to buckets whose names begin with this prefix. :type projection: str :param projection: (Optional) Specifies the set of properties to return. If used, must be 'full' or 'noAcl'. Defaults to 'noAcl'. :type fields: str :param fields: (Optional) Selector specifying which fields to include in a partial response. Must be a list of fields. For example to get a partial response with just the next page token and the language of each bucket returned: 'items/id,nextPageToken' :type project: str :param project: (Optional) the project whose buckets are to be listed. If not passed, uses the project set on the client. :rtype: :class:`~google.api_core.page_iterator.Iterator` :raises ValueError: if both ``project`` is ``None`` and the client's project is also ``None``. :returns: Iterator of all :class:`~google.cloud.storage.bucket.Bucket` belonging to this project.
[ "Get", "all", "buckets", "in", "the", "project", "associated", "to", "the", "client", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/client.py#L290-L371
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/client.py
_make_job_id
def _make_job_id(job_id, prefix=None): """Construct an ID for a new job. :type job_id: str or ``NoneType`` :param job_id: the user-provided job ID :type prefix: str or ``NoneType`` :param prefix: (Optional) the user-provided prefix for a job ID :rtype: str :returns: A job ID """ if job_id is not None: return job_id elif prefix is not None: return str(prefix) + str(uuid.uuid4()) else: return str(uuid.uuid4())
python
def _make_job_id(job_id, prefix=None): """Construct an ID for a new job. :type job_id: str or ``NoneType`` :param job_id: the user-provided job ID :type prefix: str or ``NoneType`` :param prefix: (Optional) the user-provided prefix for a job ID :rtype: str :returns: A job ID """ if job_id is not None: return job_id elif prefix is not None: return str(prefix) + str(uuid.uuid4()) else: return str(uuid.uuid4())
[ "def", "_make_job_id", "(", "job_id", ",", "prefix", "=", "None", ")", ":", "if", "job_id", "is", "not", "None", ":", "return", "job_id", "elif", "prefix", "is", "not", "None", ":", "return", "str", "(", "prefix", ")", "+", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "else", ":", "return", "str", "(", "uuid", ".", "uuid4", "(", ")", ")" ]
Construct an ID for a new job. :type job_id: str or ``NoneType`` :param job_id: the user-provided job ID :type prefix: str or ``NoneType`` :param prefix: (Optional) the user-provided prefix for a job ID :rtype: str :returns: A job ID
[ "Construct", "an", "ID", "for", "a", "new", "job", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/client.py#L2059-L2076
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/client.py
_check_mode
def _check_mode(stream): """Check that a stream was opened in read-binary mode. :type stream: IO[bytes] :param stream: A bytes IO object open for reading. :raises: :exc:`ValueError` if the ``stream.mode`` is a valid attribute and is not among ``rb``, ``r+b`` or ``rb+``. """ mode = getattr(stream, "mode", None) if isinstance(stream, gzip.GzipFile): if mode != gzip.READ: raise ValueError( "Cannot upload gzip files opened in write mode: use " "gzip.GzipFile(filename, mode='rb')" ) else: if mode is not None and mode not in ("rb", "r+b", "rb+"): raise ValueError( "Cannot upload files opened in text mode: use " "open(filename, mode='rb') or open(filename, mode='r+b')" )
python
def _check_mode(stream): """Check that a stream was opened in read-binary mode. :type stream: IO[bytes] :param stream: A bytes IO object open for reading. :raises: :exc:`ValueError` if the ``stream.mode`` is a valid attribute and is not among ``rb``, ``r+b`` or ``rb+``. """ mode = getattr(stream, "mode", None) if isinstance(stream, gzip.GzipFile): if mode != gzip.READ: raise ValueError( "Cannot upload gzip files opened in write mode: use " "gzip.GzipFile(filename, mode='rb')" ) else: if mode is not None and mode not in ("rb", "r+b", "rb+"): raise ValueError( "Cannot upload files opened in text mode: use " "open(filename, mode='rb') or open(filename, mode='r+b')" )
[ "def", "_check_mode", "(", "stream", ")", ":", "mode", "=", "getattr", "(", "stream", ",", "\"mode\"", ",", "None", ")", "if", "isinstance", "(", "stream", ",", "gzip", ".", "GzipFile", ")", ":", "if", "mode", "!=", "gzip", ".", "READ", ":", "raise", "ValueError", "(", "\"Cannot upload gzip files opened in write mode: use \"", "\"gzip.GzipFile(filename, mode='rb')\"", ")", "else", ":", "if", "mode", "is", "not", "None", "and", "mode", "not", "in", "(", "\"rb\"", ",", "\"r+b\"", ",", "\"rb+\"", ")", ":", "raise", "ValueError", "(", "\"Cannot upload files opened in text mode: use \"", "\"open(filename, mode='rb') or open(filename, mode='r+b')\"", ")" ]
Check that a stream was opened in read-binary mode. :type stream: IO[bytes] :param stream: A bytes IO object open for reading. :raises: :exc:`ValueError` if the ``stream.mode`` is a valid attribute and is not among ``rb``, ``r+b`` or ``rb+``.
[ "Check", "that", "a", "stream", "was", "opened", "in", "read", "-", "binary", "mode", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/client.py#L2079-L2101
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/client.py
Client.get_service_account_email
def get_service_account_email(self, project=None): """Get the email address of the project's BigQuery service account Note: This is the service account that BigQuery uses to manage tables encrypted by a key in KMS. Args: project (str, optional): Project ID to use for retreiving service account email. Defaults to the client's project. Returns: str: service account email address Example: >>> from google.cloud import bigquery >>> client = bigquery.Client() >>> client.get_service_account_email() my_service_account@my-project.iam.gserviceaccount.com """ if project is None: project = self.project path = "/projects/%s/serviceAccount" % (project,) api_response = self._connection.api_request(method="GET", path=path) return api_response["email"]
python
def get_service_account_email(self, project=None): """Get the email address of the project's BigQuery service account Note: This is the service account that BigQuery uses to manage tables encrypted by a key in KMS. Args: project (str, optional): Project ID to use for retreiving service account email. Defaults to the client's project. Returns: str: service account email address Example: >>> from google.cloud import bigquery >>> client = bigquery.Client() >>> client.get_service_account_email() my_service_account@my-project.iam.gserviceaccount.com """ if project is None: project = self.project path = "/projects/%s/serviceAccount" % (project,) api_response = self._connection.api_request(method="GET", path=path) return api_response["email"]
[ "def", "get_service_account_email", "(", "self", ",", "project", "=", "None", ")", ":", "if", "project", "is", "None", ":", "project", "=", "self", ".", "project", "path", "=", "\"/projects/%s/serviceAccount\"", "%", "(", "project", ",", ")", "api_response", "=", "self", ".", "_connection", ".", "api_request", "(", "method", "=", "\"GET\"", ",", "path", "=", "path", ")", "return", "api_response", "[", "\"email\"", "]" ]
Get the email address of the project's BigQuery service account Note: This is the service account that BigQuery uses to manage tables encrypted by a key in KMS. Args: project (str, optional): Project ID to use for retreiving service account email. Defaults to the client's project. Returns: str: service account email address Example: >>> from google.cloud import bigquery >>> client = bigquery.Client() >>> client.get_service_account_email() my_service_account@my-project.iam.gserviceaccount.com
[ "Get", "the", "email", "address", "of", "the", "project", "s", "BigQuery", "service", "account" ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/client.py#L164-L191
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/client.py
Client.list_projects
def list_projects(self, max_results=None, page_token=None, retry=DEFAULT_RETRY): """List projects for the project associated with this client. See https://cloud.google.com/bigquery/docs/reference/rest/v2/projects/list :type max_results: int :param max_results: (Optional) maximum number of projects to return, If not passed, defaults to a value set by the API. :type page_token: str :param page_token: (Optional) Token representing a cursor into the projects. If not passed, the API will return the first page of projects. The token marks the beginning of the iterator to be returned and the value of the ``page_token`` can be accessed at ``next_page_token`` of the :class:`~google.api_core.page_iterator.HTTPIterator`. :type retry: :class:`google.api_core.retry.Retry` :param retry: (Optional) How to retry the RPC. :rtype: :class:`~google.api_core.page_iterator.Iterator` :returns: Iterator of :class:`~google.cloud.bigquery.client.Project` accessible to the current client. """ return page_iterator.HTTPIterator( client=self, api_request=functools.partial(self._call_api, retry), path="/projects", item_to_value=_item_to_project, items_key="projects", page_token=page_token, max_results=max_results, )
python
def list_projects(self, max_results=None, page_token=None, retry=DEFAULT_RETRY): """List projects for the project associated with this client. See https://cloud.google.com/bigquery/docs/reference/rest/v2/projects/list :type max_results: int :param max_results: (Optional) maximum number of projects to return, If not passed, defaults to a value set by the API. :type page_token: str :param page_token: (Optional) Token representing a cursor into the projects. If not passed, the API will return the first page of projects. The token marks the beginning of the iterator to be returned and the value of the ``page_token`` can be accessed at ``next_page_token`` of the :class:`~google.api_core.page_iterator.HTTPIterator`. :type retry: :class:`google.api_core.retry.Retry` :param retry: (Optional) How to retry the RPC. :rtype: :class:`~google.api_core.page_iterator.Iterator` :returns: Iterator of :class:`~google.cloud.bigquery.client.Project` accessible to the current client. """ return page_iterator.HTTPIterator( client=self, api_request=functools.partial(self._call_api, retry), path="/projects", item_to_value=_item_to_project, items_key="projects", page_token=page_token, max_results=max_results, )
[ "def", "list_projects", "(", "self", ",", "max_results", "=", "None", ",", "page_token", "=", "None", ",", "retry", "=", "DEFAULT_RETRY", ")", ":", "return", "page_iterator", ".", "HTTPIterator", "(", "client", "=", "self", ",", "api_request", "=", "functools", ".", "partial", "(", "self", ".", "_call_api", ",", "retry", ")", ",", "path", "=", "\"/projects\"", ",", "item_to_value", "=", "_item_to_project", ",", "items_key", "=", "\"projects\"", ",", "page_token", "=", "page_token", ",", "max_results", "=", "max_results", ",", ")" ]
List projects for the project associated with this client. See https://cloud.google.com/bigquery/docs/reference/rest/v2/projects/list :type max_results: int :param max_results: (Optional) maximum number of projects to return, If not passed, defaults to a value set by the API. :type page_token: str :param page_token: (Optional) Token representing a cursor into the projects. If not passed, the API will return the first page of projects. The token marks the beginning of the iterator to be returned and the value of the ``page_token`` can be accessed at ``next_page_token`` of the :class:`~google.api_core.page_iterator.HTTPIterator`. :type retry: :class:`google.api_core.retry.Retry` :param retry: (Optional) How to retry the RPC. :rtype: :class:`~google.api_core.page_iterator.Iterator` :returns: Iterator of :class:`~google.cloud.bigquery.client.Project` accessible to the current client.
[ "List", "projects", "for", "the", "project", "associated", "with", "this", "client", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/client.py#L193-L227
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/client.py
Client.list_datasets
def list_datasets( self, project=None, include_all=False, filter=None, max_results=None, page_token=None, retry=DEFAULT_RETRY, ): """List datasets for the project associated with this client. See https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list Args: project (str): Optional. Project ID to use for retreiving datasets. Defaults to the client's project. include_all (bool): Optional. True if results include hidden datasets. Defaults to False. filter (str): Optional. An expression for filtering the results by label. For syntax, see https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list#filter. max_results (int): Optional. Maximum number of datasets to return. page_token (str): Optional. Token representing a cursor into the datasets. If not passed, the API will return the first page of datasets. The token marks the beginning of the iterator to be returned and the value of the ``page_token`` can be accessed at ``next_page_token`` of the :class:`~google.api_core.page_iterator.HTTPIterator`. retry (google.api_core.retry.Retry): Optional. How to retry the RPC. Returns: google.api_core.page_iterator.Iterator: Iterator of :class:`~google.cloud.bigquery.dataset.DatasetListItem`. associated with the project. """ extra_params = {} if project is None: project = self.project if include_all: extra_params["all"] = True if filter: # TODO: consider supporting a dict of label -> value for filter, # and converting it into a string here. extra_params["filter"] = filter path = "/projects/%s/datasets" % (project,) return page_iterator.HTTPIterator( client=self, api_request=functools.partial(self._call_api, retry), path=path, item_to_value=_item_to_dataset, items_key="datasets", page_token=page_token, max_results=max_results, extra_params=extra_params, )
python
def list_datasets( self, project=None, include_all=False, filter=None, max_results=None, page_token=None, retry=DEFAULT_RETRY, ): """List datasets for the project associated with this client. See https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list Args: project (str): Optional. Project ID to use for retreiving datasets. Defaults to the client's project. include_all (bool): Optional. True if results include hidden datasets. Defaults to False. filter (str): Optional. An expression for filtering the results by label. For syntax, see https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list#filter. max_results (int): Optional. Maximum number of datasets to return. page_token (str): Optional. Token representing a cursor into the datasets. If not passed, the API will return the first page of datasets. The token marks the beginning of the iterator to be returned and the value of the ``page_token`` can be accessed at ``next_page_token`` of the :class:`~google.api_core.page_iterator.HTTPIterator`. retry (google.api_core.retry.Retry): Optional. How to retry the RPC. Returns: google.api_core.page_iterator.Iterator: Iterator of :class:`~google.cloud.bigquery.dataset.DatasetListItem`. associated with the project. """ extra_params = {} if project is None: project = self.project if include_all: extra_params["all"] = True if filter: # TODO: consider supporting a dict of label -> value for filter, # and converting it into a string here. extra_params["filter"] = filter path = "/projects/%s/datasets" % (project,) return page_iterator.HTTPIterator( client=self, api_request=functools.partial(self._call_api, retry), path=path, item_to_value=_item_to_dataset, items_key="datasets", page_token=page_token, max_results=max_results, extra_params=extra_params, )
[ "def", "list_datasets", "(", "self", ",", "project", "=", "None", ",", "include_all", "=", "False", ",", "filter", "=", "None", ",", "max_results", "=", "None", ",", "page_token", "=", "None", ",", "retry", "=", "DEFAULT_RETRY", ",", ")", ":", "extra_params", "=", "{", "}", "if", "project", "is", "None", ":", "project", "=", "self", ".", "project", "if", "include_all", ":", "extra_params", "[", "\"all\"", "]", "=", "True", "if", "filter", ":", "# TODO: consider supporting a dict of label -> value for filter,", "# and converting it into a string here.", "extra_params", "[", "\"filter\"", "]", "=", "filter", "path", "=", "\"/projects/%s/datasets\"", "%", "(", "project", ",", ")", "return", "page_iterator", ".", "HTTPIterator", "(", "client", "=", "self", ",", "api_request", "=", "functools", ".", "partial", "(", "self", ".", "_call_api", ",", "retry", ")", ",", "path", "=", "path", ",", "item_to_value", "=", "_item_to_dataset", ",", "items_key", "=", "\"datasets\"", ",", "page_token", "=", "page_token", ",", "max_results", "=", "max_results", ",", "extra_params", "=", "extra_params", ",", ")" ]
List datasets for the project associated with this client. See https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list Args: project (str): Optional. Project ID to use for retreiving datasets. Defaults to the client's project. include_all (bool): Optional. True if results include hidden datasets. Defaults to False. filter (str): Optional. An expression for filtering the results by label. For syntax, see https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list#filter. max_results (int): Optional. Maximum number of datasets to return. page_token (str): Optional. Token representing a cursor into the datasets. If not passed, the API will return the first page of datasets. The token marks the beginning of the iterator to be returned and the value of the ``page_token`` can be accessed at ``next_page_token`` of the :class:`~google.api_core.page_iterator.HTTPIterator`. retry (google.api_core.retry.Retry): Optional. How to retry the RPC. Returns: google.api_core.page_iterator.Iterator: Iterator of :class:`~google.cloud.bigquery.dataset.DatasetListItem`. associated with the project.
[ "List", "datasets", "for", "the", "project", "associated", "with", "this", "client", "." ]
85e80125a59cb10f8cb105f25ecc099e4b940b50
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/client.py#L229-L291
train