_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q22400 | _zlib_no_compress | train | def _zlib_no_compress(data):
"""Compress data with zlib level 0."""
cobj = zlib.compressobj(0)
| python | {
"resource": ""
} |
q22401 | _parse_codec_options | train | def _parse_codec_options(options):
"""Parse BSON codec options."""
return CodecOptions(
document_class=options.get(
'document_class', DEFAULT_CODEC_OPTIONS.document_class),
tz_aware=options.get(
'tz_aware', DEFAULT_CODEC_OPTIONS.tz_aware),
uuid_representation=options.get(
'uuidrepresentation', DEFAULT_CODEC_OPTIONS.uuid_representation),
unicode_decode_error_handler=options.get(
| python | {
"resource": ""
} |
q22402 | CodecOptions._arguments_repr | train | def _arguments_repr(self):
"""Representation of the arguments used to create this object."""
document_class_repr = (
'dict' if self.document_class is dict
else repr(self.document_class))
uuid_rep_repr = UUID_REPRESENTATION_NAMES.get(self.uuid_representation,
self.uuid_representation)
return ('document_class=%s, tz_aware=%r, uuid_representation=%s, '
| python | {
"resource": ""
} |
q22403 | _merge_command | train | def _merge_command(run, full_result, offset, result):
"""Merge a write command result into the full bulk result.
"""
affected = result.get("n", 0)
if run.op_type == _INSERT:
full_result["nInserted"] += affected
elif run.op_type == _DELETE:
full_result["nRemoved"] += affected
elif run.op_type == _UPDATE:
upserted = result.get("upserted")
if upserted:
n_upserted = len(upserted)
for doc in upserted:
doc["index"] = run.index(doc["index"] + offset)
full_result["upserted"].extend(upserted)
full_result["nUpserted"] += n_upserted
full_result["nMatched"] += (affected - n_upserted)
else:
full_result["nMatched"] += affected
full_result["nModified"] += result["nModified"]
write_errors | python | {
"resource": ""
} |
q22404 | _raise_bulk_write_error | train | def _raise_bulk_write_error(full_result):
"""Raise a BulkWriteError from the full bulk api result.
"""
if full_result["writeErrors"]:
full_result["writeErrors"].sort(
| python | {
"resource": ""
} |
q22405 | _Bulk.add_update | train | def add_update(self, selector, update, multi=False, upsert=False,
collation=None, array_filters=None):
"""Create an update document and add it to the list of ops.
"""
validate_ok_for_update(update)
cmd = SON([('q', selector), ('u', update),
('multi', multi), ('upsert', upsert)])
collation = validate_collation_or_none(collation)
if collation is not None:
self.uses_collation = True
cmd['collation'] = collation
if array_filters is not None:
| python | {
"resource": ""
} |
q22406 | _Bulk.execute_insert_no_results | train | def execute_insert_no_results(self, sock_info, run, op_id, acknowledged):
"""Execute insert, returning no results.
"""
command = SON([('insert', self.collection.name),
('ordered', self.ordered)])
concern = {'w': int(self.ordered)}
command['writeConcern'] = concern
if self.bypass_doc_val and sock_info.max_wire_version >= 4:
command['bypassDocumentValidation'] = True
db = self.collection.database
| python | {
"resource": ""
} |
q22407 | _Bulk.execute_op_msg_no_results | train | def execute_op_msg_no_results(self, sock_info, generator):
"""Execute write commands with OP_MSG and w=0 writeConcern, unordered.
"""
db_name = self.collection.database.name
client = self.collection.database.client
listeners = client._event_listeners
op_id = _randint()
if not self.current_run:
self.current_run = next(generator)
run = self.current_run
while run:
cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
('ordered', False),
('writeConcern', {'w': 0})])
bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id,
listeners, None)
while run.idx_offset < len(run.ops):
check_keys = run.op_type == _INSERT
ops = islice(run.ops, run.idx_offset, None)
# Run as many ops as possible.
request_id, msg, to_send = _do_bulk_write_command(
self.namespace, run.op_type, cmd, ops, check_keys,
| python | {
"resource": ""
} |
q22408 | _Bulk.execute_command_no_results | train | def execute_command_no_results(self, sock_info, generator):
"""Execute write commands with OP_MSG and w=0 WriteConcern, ordered.
"""
full_result = {
"writeErrors": [],
"writeConcernErrors": [],
"nInserted": 0,
"nUpserted": 0,
"nMatched": 0,
"nModified": 0,
"nRemoved": 0,
"upserted": [],
}
# Ordered bulk writes have to be acknowledged so that we stop
# processing at the first error, even | python | {
"resource": ""
} |
q22409 | Server.run_operation_with_response | train | def run_operation_with_response(
self,
sock_info,
operation,
set_slave_okay,
listeners,
exhaust,
unpack_res):
"""Run a _Query or _GetMore operation and return a Response object.
This method is used only to run _Query/_GetMore operations from
cursors.
Can raise ConnectionFailure, OperationFailure, etc.
:Parameters:
- `operation`: A _Query or _GetMore object.
- `set_slave_okay`: Pass to operation.get_message.
- `all_credentials`: dict, maps auth source to MongoCredential.
- `listeners`: Instance of _EventListeners or None.
- `exhaust`: If True, then this is an exhaust cursor operation.
- `unpack_res`: A callable that decodes the wire protocol response.
"""
duration = None
publish = listeners.enabled_for_commands
if publish:
start = datetime.now()
send_message = not operation.exhaust_mgr
if send_message:
use_cmd = operation.use_command(sock_info, exhaust)
message = operation.get_message(
set_slave_okay, sock_info, use_cmd)
request_id, data, max_doc_size = self._split_message(message)
else:
use_cmd = False
request_id = 0
if publish:
cmd, dbn = operation.as_command(sock_info)
listeners.publish_command_start(
cmd, dbn, request_id, sock_info.address)
start = datetime.now()
try:
if send_message:
sock_info.send_message(data, max_doc_size)
reply = sock_info.receive_message(request_id)
else:
reply = sock_info.receive_message(None)
# Unpack and check for command errors.
if use_cmd:
user_fields = _CURSOR_DOC_FIELDS
legacy_response = False
else:
user_fields = None
legacy_response = True
docs = unpack_res(reply, operation.cursor_id,
operation.codec_options,
legacy_response=legacy_response,
user_fields=user_fields)
if use_cmd:
first = docs[0]
operation.client._process_response(
first, operation.session)
_check_command_response(first)
except Exception as exc:
if publish:
duration = datetime.now() - start
if isinstance(exc, (NotMasterError, OperationFailure)):
failure = exc.details
else:
failure = _convert_exception(exc)
listeners.publish_command_failure(
duration, failure, operation.name,
request_id, sock_info.address)
raise
if publish:
duration = datetime.now() - start
# Must publish in find | python | {
"resource": ""
} |
q22410 | ServerDescription.retryable_writes_supported | train | def retryable_writes_supported(self):
"""Checks if this server supports retryable writes."""
return (
| python | {
"resource": ""
} |
q22411 | _compress | train | def _compress(operation, data, ctx):
"""Takes message data, compresses it, and adds an OP_COMPRESSED header."""
compressed = ctx.compress(data)
request_id = _randint()
header = _pack_compression_header(
_COMPRESSION_HEADER_SIZE + len(compressed), # Total message length
request_id, # Request id
0, # responseTo
| python | {
"resource": ""
} |
q22412 | _insert | train | def _insert(collection_name, docs, check_keys, flags, opts):
"""Get an OP_INSERT message"""
encode = _dict_to_bson # Make local. Uses extensions.
if len(docs) == 1:
encoded = encode(docs[0], check_keys, opts)
return b"".join([
b"\x00\x00\x00\x00", # Flags don't matter for one doc.
_make_c_string(collection_name),
encoded]), len(encoded)
| python | {
"resource": ""
} |
q22413 | _insert_compressed | train | def _insert_compressed(
collection_name, docs, check_keys, continue_on_error, opts, ctx):
"""Internal compressed unacknowledged insert message helper."""
op_insert, max_bson_size = _insert(
| python | {
"resource": ""
} |
q22414 | _insert_uncompressed | train | def _insert_uncompressed(collection_name, docs, check_keys,
safe, last_error_args, continue_on_error, opts):
"""Internal insert message helper."""
op_insert, max_bson_size = _insert( | python | {
"resource": ""
} |
q22415 | _update | train | def _update(collection_name, upsert, multi, spec, doc, check_keys, opts):
"""Get an OP_UPDATE message."""
flags = 0
if upsert:
flags += 1
if multi:
flags += 2
encode = _dict_to_bson | python | {
"resource": ""
} |
q22416 | _update_compressed | train | def _update_compressed(
collection_name, upsert, multi, spec, doc, check_keys, opts, ctx):
"""Internal compressed unacknowledged update message helper."""
op_update, max_bson_size = _update(
| python | {
"resource": ""
} |
q22417 | _update_uncompressed | train | def _update_uncompressed(collection_name, upsert, multi, spec,
doc, safe, last_error_args, check_keys, opts):
"""Internal update message helper."""
op_update, max_bson_size = _update( | python | {
"resource": ""
} |
q22418 | _op_msg_compressed | train | def _op_msg_compressed(flags, command, identifier, docs, check_keys, opts,
ctx):
"""Internal OP_MSG message helper."""
msg, total_size, max_bson_size = _op_msg_no_header(
flags, | python | {
"resource": ""
} |
q22419 | _op_msg_uncompressed | train | def _op_msg_uncompressed(flags, command, identifier, docs, check_keys, opts):
"""Internal compressed OP_MSG message helper."""
data, total_size, max_bson_size = _op_msg_no_header(
flags, command, identifier, docs, check_keys, opts)
| python | {
"resource": ""
} |
q22420 | _query | train | def _query(options, collection_name, num_to_skip,
num_to_return, query, field_selector, opts, check_keys):
"""Get an OP_QUERY message."""
encoded = _dict_to_bson(query, check_keys, opts)
if field_selector:
efs = _dict_to_bson(field_selector, False, opts)
else:
efs = b""
max_bson_size = max(len(encoded), | python | {
"resource": ""
} |
q22421 | _query_compressed | train | def _query_compressed(options, collection_name, num_to_skip,
num_to_return, query, field_selector,
opts, check_keys=False, ctx=None):
"""Internal compressed query message helper."""
op_query, max_bson_size = _query(
options,
collection_name,
| python | {
"resource": ""
} |
q22422 | _query_uncompressed | train | def _query_uncompressed(options, collection_name, num_to_skip,
num_to_return, query, field_selector, opts, check_keys=False):
"""Internal query message helper."""
op_query, max_bson_size = _query(
options,
collection_name,
num_to_skip,
| python | {
"resource": ""
} |
q22423 | _get_more | train | def _get_more(collection_name, num_to_return, cursor_id):
"""Get an OP_GET_MORE message."""
return b"".join([
_ZERO_32,
| python | {
"resource": ""
} |
q22424 | _get_more_compressed | train | def _get_more_compressed(collection_name, num_to_return, cursor_id, ctx):
"""Internal compressed | python | {
"resource": ""
} |
q22425 | _delete | train | def _delete(collection_name, spec, opts, flags):
"""Get an OP_DELETE message."""
encoded = _dict_to_bson(spec, False, opts) # Uses extensions.
return b"".join([
| python | {
"resource": ""
} |
q22426 | _delete_compressed | train | def _delete_compressed(collection_name, spec, opts, flags, ctx):
"""Internal compressed unacknowledged delete message helper."""
op_delete, max_bson_size = _delete(collection_name, | python | {
"resource": ""
} |
q22427 | _delete_uncompressed | train | def _delete_uncompressed(
collection_name, spec, safe, last_error_args, opts, flags=0):
"""Internal delete message helper."""
op_delete, max_bson_size = _delete(collection_name, spec, | python | {
"resource": ""
} |
q22428 | _raise_document_too_large | train | def _raise_document_too_large(operation, doc_size, max_size):
"""Internal helper for raising DocumentTooLarge."""
if operation == "insert":
raise DocumentTooLarge("BSON document too large (%d bytes)"
" - the connected server supports"
" BSON document sizes up to %d"
| python | {
"resource": ""
} |
q22429 | _do_batched_insert | train | def _do_batched_insert(collection_name, docs, check_keys,
safe, last_error_args, continue_on_error, opts,
ctx):
"""Insert `docs` using multiple batches.
"""
def _insert_message(insert_message, send_safe):
"""Build the insert message with header and GLE.
"""
request_id, final_message = __pack_message(2002, insert_message)
if send_safe:
request_id, error_message, _ = __last_error(collection_name,
last_error_args)
final_message += error_message
return request_id, final_message
send_safe = safe or not continue_on_error
last_error = None
data = StringIO()
data.write(struct.pack("<i", int(continue_on_error)))
data.write(_make_c_string(collection_name))
message_length = begin_loc = data.tell()
has_docs = False
to_send = []
encode = _dict_to_bson # Make local
compress = ctx.compress and not (safe or send_safe)
for doc in docs:
encoded = encode(doc, check_keys, opts)
encoded_length = len(encoded)
too_large = (encoded_length > ctx.max_bson_size)
message_length += encoded_length
if message_length < ctx.max_message_size and not too_large:
data.write(encoded)
to_send.append(doc)
has_docs = True
continue
if has_docs:
# We have enough data, send this message.
try:
if compress:
rid, msg = None, data.getvalue()
else:
rid, msg = _insert_message(data.getvalue(), send_safe)
ctx.legacy_bulk_insert(
rid, msg, 0, send_safe, to_send, compress)
# Exception type could be OperationFailure or a subtype
# (e.g. DuplicateKeyError)
except OperationFailure as | python | {
"resource": ""
} |
q22430 | _batched_op_msg_impl | train | def _batched_op_msg_impl(
operation, command, docs, check_keys, ack, opts, ctx, buf):
"""Create a batched OP_MSG write."""
max_bson_size = ctx.max_bson_size
max_write_batch_size = ctx.max_write_batch_size
max_message_size = ctx.max_message_size
flags = b"\x00\x00\x00\x00" if ack else b"\x02\x00\x00\x00"
# Flags
buf.write(flags)
# Type 0 Section
buf.write(b"\x00")
buf.write(_dict_to_bson(command, False, opts))
# Type 1 Section
buf.write(b"\x01")
size_location = buf.tell()
# Save space for size
buf.write(b"\x00\x00\x00\x00")
try:
buf.write(_OP_MSG_MAP[operation])
except KeyError:
raise InvalidOperation('Unknown command')
if operation in (_UPDATE, _DELETE):
check_keys = False
to_send = []
idx = 0
for doc in docs:
# Encode the current operation
value = _dict_to_bson(doc, check_keys, opts)
doc_length = len(value)
new_message_size = buf.tell() + doc_length
# Does first document exceed max_message_size?
doc_too_large = (idx == 0 and (new_message_size > max_message_size))
# When OP_MSG is used unacknowleged we have to check
# document size client side or applications won't be notified.
# Otherwise we let the server deal with documents that are too large
# since ordered=False causes those documents to be skipped instead of
# | python | {
"resource": ""
} |
q22431 | _encode_batched_op_msg | train | def _encode_batched_op_msg(
operation, command, docs, check_keys, ack, opts, ctx):
"""Encode the next batched insert, update, or delete operation
as OP_MSG.
"""
| python | {
"resource": ""
} |
q22432 | _batched_op_msg_compressed | train | def _batched_op_msg_compressed(
operation, command, docs, check_keys, ack, opts, ctx):
"""Create the next batched insert, update, or delete operation
with OP_MSG, compressed.
| python | {
"resource": ""
} |
q22433 | _batched_op_msg | train | def _batched_op_msg(
operation, command, docs, check_keys, ack, opts, ctx):
"""OP_MSG implementation entry point."""
buf = StringIO()
# Save space for message length and request id
buf.write(_ZERO_64)
# responseTo, opCode
buf.write(b"\x00\x00\x00\x00\xdd\x07\x00\x00")
to_send, length = _batched_op_msg_impl(
operation, command, docs, check_keys, ack, opts, ctx, buf)
# Header - request id | python | {
"resource": ""
} |
q22434 | _do_batched_op_msg | train | def _do_batched_op_msg(
namespace, operation, command, docs, check_keys, opts, ctx):
"""Create the next batched insert, update, or delete operation
using OP_MSG.
"""
command['$db'] = namespace.split('.', 1)[0]
if 'writeConcern' in command:
ack = bool(command['writeConcern'].get('w', 1))
else:
ack = True
| python | {
"resource": ""
} |
q22435 | _batched_write_command_compressed | train | def _batched_write_command_compressed(
namespace, operation, command, docs, check_keys, opts, ctx):
"""Create the next batched insert, update, or delete command, compressed.
| python | {
"resource": ""
} |
q22436 | _encode_batched_write_command | train | def _encode_batched_write_command(
namespace, operation, command, docs, check_keys, opts, ctx):
"""Encode the next batched insert, update, or delete command.
"""
buf = StringIO()
| python | {
"resource": ""
} |
q22437 | _batched_write_command | train | def _batched_write_command(
namespace, operation, command, docs, check_keys, opts, ctx):
"""Create the next batched insert, update, or delete command.
"""
buf = StringIO()
# Save space for message length and request id
buf.write(_ZERO_64)
# responseTo, opCode
buf.write(b"\x00\x00\x00\x00\xd4\x07\x00\x00")
# Write OP_QUERY write command
to_send, length = _batched_write_command_impl(
namespace, operation, command, docs, check_keys, | python | {
"resource": ""
} |
q22438 | _do_batched_write_command | train | def _do_batched_write_command(
namespace, operation, command, docs, check_keys, opts, ctx):
"""Batched write commands entry point."""
if ctx.sock_info.compression_context:
return _batched_write_command_compressed(
| python | {
"resource": ""
} |
q22439 | _do_bulk_write_command | train | def _do_bulk_write_command(
namespace, operation, command, docs, check_keys, opts, ctx):
"""Bulk write commands entry point."""
if ctx.sock_info.max_wire_version > 5:
return _do_batched_op_msg(
| python | {
"resource": ""
} |
q22440 | _batched_write_command_impl | train | def _batched_write_command_impl(
namespace, operation, command, docs, check_keys, opts, ctx, buf):
"""Create a batched OP_QUERY write command."""
max_bson_size = ctx.max_bson_size
max_write_batch_size = ctx.max_write_batch_size
# Max BSON object size + 16k - 2 bytes for ending NUL bytes.
# Server guarantees there is enough room: SERVER-10643.
max_cmd_size = max_bson_size + _COMMAND_OVERHEAD
# No options
buf.write(_ZERO_32)
# Namespace as C string
buf.write(b(namespace))
buf.write(_ZERO_8)
# Skip: 0, Limit: -1
buf.write(_SKIPLIM)
# Where to write command document length
command_start = buf.tell()
buf.write(bson.BSON.encode(command))
# Start of payload
buf.seek(-1, 2)
# Work around some Jython weirdness.
buf.truncate()
try:
buf.write(_OP_MAP[operation])
except KeyError:
raise InvalidOperation('Unknown command')
if operation in (_UPDATE, _DELETE):
check_keys = False
# Where to write list document length
list_start = buf.tell() - 4
to_send = []
idx = 0
for doc in docs:
# Encode the current operation
key = b(str(idx))
value = bson.BSON.encode(doc, check_keys, opts)
# Is there enough room to add this document? max_cmd_size accounts for
# the two trailing null bytes.
enough_data = | python | {
"resource": ""
} |
q22441 | _OpReply.raw_response | train | def raw_response(self, cursor_id=None):
"""Check the response header from the database, without decoding BSON.
Check the response for errors and unpack.
Can raise CursorNotFound, NotMasterError, ExecutionTimeout, or
OperationFailure.
:Parameters:
- `cursor_id` (optional): cursor_id we sent to get this response -
used for raising an informative exception when we get cursor id not
valid at server response.
"""
if self.flags & 1:
# Shouldn't get this response if we aren't doing a getMore
if cursor_id is None:
raise ProtocolError("No cursor id for getMore operation")
# Fake a getMore command response. OP_GET_MORE provides no
# document.
msg = "Cursor not found, cursor id: %d" % (cursor_id,)
errobj = {"ok": 0, "errmsg": msg, "code": 43}
raise CursorNotFound(msg, 43, errobj)
elif self.flags & 2:
error_object = bson.BSON(self.documents).decode()
# Fake the ok field if it doesn't exist.
error_object.setdefault("ok", 0)
| python | {
"resource": ""
} |
q22442 | _OpReply.unpack | train | def unpack(cls, msg):
"""Construct an _OpReply from raw bytes."""
# PYTHON-945: ignore starting_from field.
flags, cursor_id, _, number_returned = cls.UNPACK_FROM(msg)
| python | {
"resource": ""
} |
q22443 | _OpMsg.unpack_response | train | def unpack_response(self, cursor_id=None,
codec_options=_UNICODE_REPLACE_CODEC_OPTIONS,
user_fields=None, legacy_response=False):
"""Unpack a OP_MSG command response.
:Parameters:
- `cursor_id` (optional): Ignored, for compatibility with _OpReply.
- `codec_options` (optional): an instance of
:class:`~bson.codec_options.CodecOptions`
| python | {
"resource": ""
} |
q22444 | _OpMsg.unpack | train | def unpack(cls, msg):
"""Construct an _OpMsg from raw bytes."""
flags, first_payload_type, first_payload_size = cls.UNPACK_FROM(msg)
if flags != 0:
raise ProtocolError("Unsupported OP_MSG flags (%r)" % (flags,))
if first_payload_type != 0:
raise ProtocolError(
| python | {
"resource": ""
} |
q22445 | TopologyDescription.has_known_servers | train | def has_known_servers(self):
"""Whether there are any Servers of types besides Unknown."""
| python | {
"resource": ""
} |
q22446 | MongoClient._cache_index | train | def _cache_index(self, dbname, collection, index, cache_for):
"""Add an index to the index cache for ensure_index operations."""
now = datetime.datetime.utcnow()
expire = datetime.timedelta(seconds=cache_for) + now
with self.__index_cache_lock:
if dbname not in self.__index_cache:
self.__index_cache[dbname] = {}
self.__index_cache[dbname][collection] = {}
self.__index_cache[dbname][collection][index] = expire
elif collection not | python | {
"resource": ""
} |
q22447 | MongoClient.close | train | def close(self):
"""Cleanup client resources and disconnect from MongoDB.
On MongoDB >= 3.6, end all server sessions created by this client by
sending one or more endSessions commands.
Close all sockets in the connection pools and stop the monitor threads.
If this instance is used again it will be automatically re-opened and
the threads restarted.
.. versionchanged:: 3.6
End all server sessions created by this client.
"""
session_ids = self._topology.pop_all_sessions()
if session_ids: | python | {
"resource": ""
} |
q22448 | MongoClient._select_server | train | def _select_server(self, server_selector, session, address=None):
"""Select a server to run an operation on this client.
:Parameters:
- `server_selector`: The server selector to use if the session is
not pinned and no address is given.
- `session`: The ClientSession for the next operation, or None. May
be pinned to a mongos server address.
- `address` (optional): Address when sending a message
to a specific server, used for getMore.
"""
try:
topology = self._get_topology()
address = address or (session and session._pinned_address)
if address:
# We're running a getMore or this session is pinned to a mongos.
server = topology.select_server_by_address(address)
if not server:
raise AutoReconnect('server %s:%d no longer available'
| python | {
"resource": ""
} |
q22449 | MongoClient._reset_on_error | train | def _reset_on_error(self, server_address, session):
"""On "not master" or "node is recovering" errors reset the server
according to the SDAM spec.
Unpin the session on transient transaction errors.
"""
try:
try:
yield
except PyMongoError as exc:
if session and exc.has_error_label(
"TransientTransactionError"):
session._unpin_mongos()
raise
except NetworkTimeout:
# The socket has been closed. Don't reset the server.
# Server Discovery And Monitoring Spec: "When an application
# operation fails because of any network error besides a socket
# timeout...."
raise
except NotMasterError:
# "When the client sees a "not master" error it MUST replace the
# server's description with type Unknown. It MUST request an
# immediate check of the server."
self._reset_server_and_request_check(server_address)
| python | {
"resource": ""
} |
q22450 | MongoClient._retryable_write | train | def _retryable_write(self, retryable, func, session):
"""Internal retryable write helper."""
with self._tmp_session(session) as s:
| python | {
"resource": ""
} |
q22451 | MongoClient.close_cursor | train | def close_cursor(self, cursor_id, address=None):
"""DEPRECATED - Send a kill cursors message soon with the given id.
Raises :class:`TypeError` if `cursor_id` is not an instance of
``(int, long)``. What closing the cursor actually means
depends on this client's cursor manager.
This method may be called from a :class:`~pymongo.cursor.Cursor`
destructor during garbage collection, so it isn't safe to take a
lock or do network I/O. Instead, we schedule the cursor to be closed
| python | {
"resource": ""
} |
q22452 | MongoClient._kill_cursors | train | def _kill_cursors(self, cursor_ids, address, topology, session):
"""Send a kill cursors message with the given ids."""
listeners = self._event_listeners
publish = listeners.enabled_for_commands
if address:
# address could be a tuple or _CursorAddress, but
# select_server_by_address needs (host, port).
server = topology.select_server_by_address(tuple(address))
else:
# Application called close_cursor() with no address.
server = topology.select_server(writable_server_selector)
try:
namespace = address.namespace
db, coll = namespace.split('.', 1)
except AttributeError:
namespace = None
db = coll = "OP_KILL_CURSORS"
spec = SON([('killCursors', coll), ('cursors', cursor_ids)])
with server.get_socket(self.__all_credentials) as sock_info:
if sock_info.max_wire_version >= 4 and namespace is not None:
sock_info.command(db, spec, session=session, client=self)
else:
if publish:
start = datetime.datetime.now()
request_id, msg = message.kill_cursors(cursor_ids)
if publish:
duration = datetime.datetime.now() - start
# Here and below, address could be a tuple or
# _CursorAddress. We always want to publish a
# tuple to match the rest of the monitoring
# API.
listeners.publish_command_start(
spec, db, request_id, tuple(address))
| python | {
"resource": ""
} |
q22453 | MongoClient._process_periodic_tasks | train | def _process_periodic_tasks(self):
"""Process any pending kill cursors requests and
maintain connection pool parameters."""
address_to_cursor_ids = defaultdict(list)
# Other threads or the GC may append to the queue concurrently.
while True:
try:
address, cursor_ids = self.__kill_cursors_queue.pop()
except IndexError:
break
address_to_cursor_ids[address].extend(cursor_ids)
# Don't re-open topology if it's closed and there's no pending cursors.
if address_to_cursor_ids:
topology = self._get_topology()
for | python | {
"resource": ""
} |
q22454 | MongoClient.start_session | train | def start_session(self,
causal_consistency=True,
default_transaction_options=None):
"""Start a logical session.
This method takes the same parameters as
:class:`~pymongo.client_session.SessionOptions`. See the
:mod:`~pymongo.client_session` module for details and examples.
Requires MongoDB 3.6. It is an error to call :meth:`start_session`
| python | {
"resource": ""
} |
q22455 | MongoClient.server_info | train | def server_info(self, session=None):
"""Get information about the MongoDB server we're connected to.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
| python | {
"resource": ""
} |
q22456 | MongoClient.list_databases | train | def list_databases(self, session=None, **kwargs):
"""Get a cursor over the databases of the connected server.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): Optional parameters of the
`listDatabases command
<https://docs.mongodb.com/manual/reference/command/listDatabases/>`_
can be passed as keyword arguments to this method. The supported
options differ by server version.
:Returns:
An instance of :class:`~pymongo.command_cursor.CommandCursor`.
.. versionadded:: 3.6
"""
| python | {
"resource": ""
} |
q22457 | MongoClient.list_database_names | train | def list_database_names(self, session=None):
"""Get a list of the names of all databases on the connected server.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`. | python | {
"resource": ""
} |
q22458 | MongoClient.get_default_database | train | def get_default_database(self, default=None, codec_options=None,
read_preference=None, write_concern=None, read_concern=None):
"""Get the database named in the MongoDB connection URI.
>>> uri = 'mongodb://host/my_database'
>>> client = MongoClient(uri)
>>> db = client.get_default_database()
>>> assert db.name == 'my_database'
>>> db = client.get_database()
>>> assert db.name == 'my_database'
Useful in scripts where you want to choose which database to use
based only on the URI in a configuration file.
:Parameters:
- `default` (optional): the database name to use if no database name
was provided in the URI.
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) the :attr:`codec_options` of this :class:`MongoClient` is
used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) the :attr:`read_preference` of this
:class:`MongoClient` is used. See :mod:`~pymongo.read_preferences`
for options.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) the :attr:`write_concern` of this :class:`MongoClient` is
| python | {
"resource": ""
} |
q22459 | MongoClient._database_default_options | train | def _database_default_options(self, name):
"""Get a Database instance with the default settings."""
return self.get_database(
name, codec_options=DEFAULT_CODEC_OPTIONS,
| python | {
"resource": ""
} |
q22460 | _handle_option_deprecations | train | def _handle_option_deprecations(options):
"""Issue appropriate warnings when deprecated options are present in the
options dictionary. Removes deprecated option key, value pairs if the
options dictionary is found to also have the renamed option."""
undeprecated_options = _CaseInsensitiveDictionary()
for key, value in iteritems(options):
optname = str(key).lower()
if optname in URI_OPTIONS_DEPRECATION_MAP:
renamed_key = URI_OPTIONS_DEPRECATION_MAP[optname]
if renamed_key.lower() in options:
warnings.warn("Deprecated option '%s' ignored in favor of "
| python | {
"resource": ""
} |
q22461 | _normalize_options | train | def _normalize_options(options):
"""Renames keys in the options dictionary to their internally-used
names."""
normalized_options = {}
for key, value in iteritems(options):
optname = str(key).lower() | python | {
"resource": ""
} |
q22462 | split_options | train | def split_options(opts, validate=True, warn=False, normalize=True):
"""Takes the options portion of a MongoDB URI, validates each option
and returns the options in a dictionary.
:Parameters:
- `opt`: A string representing MongoDB URI options.
- `validate`: If ``True`` (the default), validate and normalize all
options.
- `warn`: If ``False`` (the default), suppress all warnings raised
during validation of options.
- `normalize`: If ``True`` (the default), renames all options to their
| python | {
"resource": ""
} |
q22463 | GridFS.new_file | train | def new_file(self, **kwargs):
"""Create a new file in GridFS.
Returns a new :class:`~gridfs.grid_file.GridIn` instance to
which data can be written. Any keyword arguments will be
passed through to :meth:`~gridfs.grid_file.GridIn`.
If the ``"_id"`` of the file is manually specified, it must
not already exist in GridFS. Otherwise
:class:`~gridfs.errors.FileExists` is raised.
| python | {
"resource": ""
} |
q22464 | GridFS.get_version | train | def get_version(self, filename=None, version=-1, session=None, **kwargs):
"""Get a file from GridFS by ``"filename"`` or metadata fields.
Returns a version of the file in GridFS whose filename matches
`filename` and whose metadata fields match the supplied keyword
arguments, as an instance of :class:`~gridfs.grid_file.GridOut`.
Version numbering is a convenience atop the GridFS API provided
by MongoDB. If more than one file matches the query (either by
`filename` alone, by metadata fields, or by a combination of
both), then version ``-1`` will be the most recently uploaded
matching file, ``-2`` the second most recently
uploaded, etc. Version ``0`` will be the first version
uploaded, ``1`` the second version, etc. So if three versions
have been uploaded, then version ``0`` is the same as version
``-3``, version ``1`` is the same as version ``-2``, and
version ``2`` is the same as version ``-1``.
Raises :class:`~gridfs.errors.NoFile` if no such version of
that file exists.
:Parameters:
- `filename`: ``"filename"`` of the file to get, or `None`
- `version` (optional): version of the file to get (defaults
to -1, the most recent version uploaded)
- `session` (optional): a
| python | {
"resource": ""
} |
q22465 | GridFS.get_last_version | train | def get_last_version(self, filename=None, session=None, **kwargs):
"""Get the most recent version of a file in GridFS by ``"filename"``
or metadata fields.
Equivalent to calling :meth:`get_version` with the default
`version` (``-1``).
| python | {
"resource": ""
} |
q22466 | GridFSBucket.upload_from_stream | train | def upload_from_stream(self, filename, source, chunk_size_bytes=None,
metadata=None, session=None):
"""Uploads a user file to a GridFS bucket.
Reads the contents of the user file from `source` and uploads
it to the file `filename`. Source can be a string or file-like object.
For example::
my_db = MongoClient().test
fs = GridFSBucket(my_db)
file_id = fs.upload_from_stream(
"test_file",
"data I want to store!",
chunk_size_bytes=4,
metadata={"contentType": "text/plain"})
Returns the _id of the uploaded file.
Raises :exc:`~gridfs.errors.NoFile` if no such version of
that file exists.
Raises :exc:`~ValueError` if `filename` is not a string.
:Parameters:
- `filename`: The name of the file to upload.
- `source`: The source stream of the content to be uploaded. Must be
a file-like object that implements :meth:`read` or a string.
- `chunk_size_bytes` (options): The number of bytes per chunk of this
file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`.
| python | {
"resource": ""
} |
q22467 | GridFSBucket.rename | train | def rename(self, file_id, new_filename, session=None):
"""Renames the stored file with the specified file_id.
For example::
my_db = MongoClient().test
fs = GridFSBucket(my_db)
# Get _id of file to rename
file_id = fs.upload_from_stream("test_file", "data I want to store!")
fs.rename(file_id, "new_test_name")
Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists.
:Parameters:
- `file_id`: The _id of the file to be renamed.
- `new_filename`: The new name of the file.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession` | python | {
"resource": ""
} |
q22468 | Database.with_options | train | def with_options(self, codec_options=None, read_preference=None,
write_concern=None, read_concern=None):
"""Get a clone of this database changing the specified settings.
>>> db1.read_preference
Primary()
>>> from pymongo import ReadPreference
>>> db2 = db1.with_options(read_preference=ReadPreference.SECONDARY)
>>> db1.read_preference
Primary()
>>> db2.read_preference
Secondary(tag_sets=None)
:Parameters:
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) the :attr:`codec_options` of this :class:`Collection`
is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) the :attr:`read_preference` of this
:class:`Collection` is used. See :mod:`~pymongo.read_preferences`
for options.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) the :attr:`write_concern` of this :class:`Collection`
is | python | {
"resource": ""
} |
q22469 | Database.watch | train | def watch(self, pipeline=None, full_document='default', resume_after=None,
max_await_time_ms=None, batch_size=None, collation=None,
start_at_operation_time=None, session=None):
"""Watch changes on this database.
Performs an aggregation with an implicit initial ``$changeStream``
stage and returns a
:class:`~pymongo.change_stream.DatabaseChangeStream` cursor which
iterates over changes on all collections in this database.
Introduced in MongoDB 4.0.
.. code-block:: python
with db.watch() as stream:
for change in stream:
print(change)
The :class:`~pymongo.change_stream.DatabaseChangeStream` iterable
blocks until the next change document is returned or an error is
raised. If the
:meth:`~pymongo.change_stream.DatabaseChangeStream.next` method
encounters a network error when retrieving a batch from the server,
it will automatically attempt to recreate the cursor such that no
change events are missed. Any error encountered during the resume
attempt indicates there may be an outage and will be raised.
.. code-block:: python
try:
with db.watch(
[{'$match': {'operationType': 'insert'}}]) as stream:
for insert_change in stream:
print(insert_change)
except pymongo.errors.PyMongoError:
# The ChangeStream encountered an unrecoverable error or the
# resume attempt failed to recreate the cursor.
logging.error('...')
For a precise description of the resume process see the
`change streams specification`_.
:Parameters:
- `pipeline` (optional): A list of aggregation pipeline stages to
append to an initial ``$changeStream`` stage. Not all
pipeline stages are valid after a ``$changeStream`` stage, see the
MongoDB documentation on change streams for the supported stages.
- `full_document` (optional): The fullDocument to pass as an option
to the ``$changeStream`` stage. Allowed values: 'default',
'updateLookup'. Defaults to 'default'.
When set to 'updateLookup', the change notification for partial
updates will include both a delta describing the changes to the
document, as well as a copy of the entire document that was
| python | {
"resource": ""
} |
q22470 | Database._retryable_read_command | train | def _retryable_read_command(self, command, value=1, check=True,
allowable_errors=None, read_preference=None,
codec_options=DEFAULT_CODEC_OPTIONS, session=None, **kwargs):
"""Same as command but used for retryable read commands."""
if read_preference is None:
read_preference = ((session and session._txn_read_preference()) | python | {
"resource": ""
} |
q22471 | Database.list_collections | train | def list_collections(self, session=None, filter=None, **kwargs):
"""Get a cursor over the collectons of this database.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `filter` (optional): A query document to filter the list of
collections returned from the listCollections command.
- `**kwargs` (optional): Optional parameters of the
`listCollections command
<https://docs.mongodb.com/manual/reference/command/listCollections/>`_
can be passed as keyword arguments to this method. The supported
options differ by server version.
:Returns:
An instance of :class:`~pymongo.command_cursor.CommandCursor`.
| python | {
"resource": ""
} |
q22472 | Database.validate_collection | train | def validate_collection(self, name_or_collection,
scandata=False, full=False, session=None):
"""Validate a collection.
Returns a dict of validation info. Raises CollectionInvalid if
validation fails.
:Parameters:
- `name_or_collection`: A Collection object or the name of a
collection to validate.
- `scandata`: Do extra checks beyond checking the overall
structure of the collection.
- `full`: Have the server do a more thorough scan of the
collection. Use with `scandata` for a thorough scan
of the structure of the collection and the individual
documents.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
name = name_or_collection
if isinstance(name, Collection):
name = name.name
if not isinstance(name, string_type):
raise TypeError("name_or_collection must be an instance of "
"%s or Collection" % (string_type.__name__,))
result = self.command("validate", _unicode(name),
scandata=scandata, full=full, session=session)
valid = True
| python | {
"resource": ""
} |
q22473 | Database.profiling_level | train | def profiling_level(self, session=None):
"""Get the database's current profiling level.
Returns one of (:data:`~pymongo.OFF`,
:data:`~pymongo.SLOW_ONLY`, :data:`~pymongo.ALL`).
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
| python | {
"resource": ""
} |
q22474 | Database.set_profiling_level | train | def set_profiling_level(self, level, slow_ms=None, session=None):
"""Set the database's profiling level.
:Parameters:
- `level`: Specifies a profiling level, see list of possible values
below.
- `slow_ms`: Optionally modify the threshold for the profile to
consider a query or operation. Even if the profiler is off queries
slower than the `slow_ms` level will get written to the logs.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
Possible `level` values:
+----------------------------+------------------------------------+
| Level | Setting |
+============================+====================================+
| python | {
"resource": ""
} |
q22475 | CommandCursor._try_next | train | def _try_next(self, get_more_allowed):
"""Advance the cursor blocking for at most one getMore command."""
if not len(self.__data) and not self.__killed and get_more_allowed:
self._refresh()
if len(self.__data):
| python | {
"resource": ""
} |
q22476 | ObjectId._random | train | def _random(cls):
"""Generate a 5-byte random number once per process.
"""
| python | {
"resource": ""
} |
q22477 | SocketInfo.command | train | def command(self, dbname, spec, slave_ok=False,
read_preference=ReadPreference.PRIMARY,
codec_options=DEFAULT_CODEC_OPTIONS, check=True,
allowable_errors=None, check_keys=False,
read_concern=None,
write_concern=None,
parse_write_concern_error=False,
collation=None,
session=None,
client=None,
retryable_write=False,
publish_events=True,
user_fields=None):
"""Execute a command or raise an error.
:Parameters:
- `dbname`: name of the database on which to run the command
- `spec`: a command document as a dict, SON, or mapping object
- `slave_ok`: whether to set the SlaveOkay wire protocol bit
- `read_preference`: a read preference
- `codec_options`: a CodecOptions instance
- `check`: raise OperationFailure if there are errors
- `allowable_errors`: errors to ignore if `check` is True
- `check_keys`: if True, check `spec` for invalid keys
- `read_concern`: The read concern for this command.
- `write_concern`: The write concern for this command.
- `parse_write_concern_error`: Whether to parse the
``writeConcernError`` field in the command response.
- `collation`: The collation for this command.
- `session`: optional ClientSession instance.
- `client`: optional MongoClient for gossipping $clusterTime.
- `retryable_write`: True if this command is a retryable write.
- `publish_events`: Should we publish events for this command?
- `user_fields` (optional): Response fields that should be decoded
using the TypeDecoders from codec_options, passed to
bson._decode_all_selective.
"""
self.validate_session(client, session)
session = _validate_session_write_concern(session, write_concern)
# Ensure command name remains in first place.
if not isinstance(spec, ORDERED_TYPES):
spec = SON(spec)
if (read_concern and self.max_wire_version < 4
and not read_concern.ok_for_legacy):
raise ConfigurationError(
'read concern level of %s is not valid '
'with a max wire version of %d.'
% (read_concern.level, self.max_wire_version))
if not (write_concern is None or write_concern.acknowledged or
collation is None):
raise ConfigurationError(
| python | {
"resource": ""
} |
q22478 | SocketInfo.validate_session | train | def validate_session(self, client, session):
"""Validate this session before use with client.
Raises error if this session is logged in as a different user or
the client is not the one that created the session.
"""
if session:
if session._client is not client:
raise InvalidOperation(
'Can only use session with the MongoClient that'
| python | {
"resource": ""
} |
q22479 | SocketInfo.send_cluster_time | train | def send_cluster_time(self, command, session, client):
"""Add cluster time for MongoDB | python | {
"resource": ""
} |
q22480 | Pool.remove_stale_sockets | train | def remove_stale_sockets(self):
"""Removes stale sockets then adds new ones if pool is too small."""
if self.opts.max_idle_time_seconds is not None:
with self.lock:
while (self.sockets and
self.sockets[-1].idle_time_seconds() > self.opts.max_idle_time_seconds):
sock_info = self.sockets.pop()
sock_info.close()
| python | {
"resource": ""
} |
q22481 | SocketChecker.socket_closed | train | def socket_closed(self, sock):
"""Return True if we know socket has been closed, False otherwise.
"""
while True:
try:
if self._poller:
with self._lock:
self._poller.register(sock, _EVENT_MASK)
try:
rd = self._poller.poll(0)
finally:
self._poller.unregister(sock)
else:
rd, _, _ = select.select([sock], [], [], 0)
except (RuntimeError, KeyError):
# RuntimeError is raised during a concurrent poll. KeyError
# is raised by unregister if the socket is not in the poller.
# These errors should not be possible since we protect the
# poller with a mutex.
raise
except ValueError:
# ValueError is raised by register/unregister/select if the
| python | {
"resource": ""
} |
q22482 | _get_object_size | train | def _get_object_size(data, position, obj_end):
"""Validate and return a BSON document's size."""
try:
obj_size = _UNPACK_INT(data[position:position + 4])[0]
except struct.error as exc:
raise InvalidBSON(str(exc))
end = position + obj_size - 1
if data[end:end + 1] != b"\x00":
raise InvalidBSON("bad eoo")
if end >= obj_end:
| python | {
"resource": ""
} |
q22483 | _elements_to_dict | train | def _elements_to_dict(data, position, obj_end, opts, result=None):
"""Decode a BSON document into result."""
if result is None:
result = opts.document_class()
| python | {
"resource": ""
} |
q22484 | _datetime_to_millis | train | def _datetime_to_millis(dtm):
"""Convert datetime to milliseconds since epoch UTC."""
if dtm.utcoffset() is not None:
dtm = dtm - dtm.utcoffset()
| python | {
"resource": ""
} |
q22485 | _decode_all_selective | train | def _decode_all_selective(data, codec_options, fields):
"""Decode BSON data to a single document while using user-provided
custom decoding logic.
`data` must be a string representing a valid, BSON-encoded document.
:Parameters:
- `data`: BSON data
- `codec_options`: An instance of
:class:`~bson.codec_options.CodecOptions` with user-specified type
decoders. If no decoders are found, this method is the same as
``decode_all``.
- `fields`: Map of document namespaces where data that needs
to be custom decoded lives or None. For example, to custom decode a
list of objects in 'field1.subfield1', the specified value should be
``{'field1': {'subfield1': 1}}``. If ``fields`` is an empty map or
None, this method is the same as ``decode_all``.
:Returns:
- `document_list`: | python | {
"resource": ""
} |
q22486 | _validate_session_write_concern | train | def _validate_session_write_concern(session, write_concern):
"""Validate that an explicit session is not used with an unack'ed write.
Returns the session to use for the next operation.
"""
if session:
if write_concern is not None and not write_concern.acknowledged:
# For unacknowledged writes without an explicit session,
# drivers SHOULD NOT use an implicit session. If a driver
# creates an implicit session for unacknowledged writes
# without an explicit session, the driver MUST NOT send the
| python | {
"resource": ""
} |
q22487 | ClientSession._inherit_option | train | def _inherit_option(self, name, val):
"""Return the inherited TransactionOption value."""
if val:
return val
txn_opts = self.options.default_transaction_options
| python | {
"resource": ""
} |
q22488 | ClientSession.with_transaction | train | def with_transaction(self, callback, read_concern=None, write_concern=None,
read_preference=None):
"""Execute a callback in a transaction.
This method starts a transaction on this session, executes ``callback``
once, and then commits the transaction. For example::
def callback(session):
orders = session.client.db.orders
inventory = session.client.db.inventory
orders.insert_one({"sku": "abc123", "qty": 100}, session=session)
inventory.update_one({"sku": "abc123", "qty": {"$gte": 100}},
{"$inc": {"qty": -100}}, session=session)
with client.start_session() as session:
session.with_transaction(callback)
To pass arbitrary arguments to the ``callback``, wrap your callable
with a ``lambda`` like this::
def callback(session, custom_arg, custom_kwarg=None):
# Transaction operations...
with client.start_session() as session:
session.with_transaction(
lambda s: callback(s, "custom_arg", custom_kwarg=1))
In the event of an exception, ``with_transaction`` may retry the commit
or the entire transaction, therefore ``callback`` may be invoked
multiple times by a single call to ``with_transaction``. Developers
should be mindful of this possiblity when writing a ``callback`` that
modifies application state or has any other side-effects.
Note that even when the ``callback`` is invoked multiple times,
``with_transaction`` ensures that the transaction will be committed
at-most-once on the server.
The ``callback`` should not attempt to start new transactions, but
should simply run operations meant to be contained within a
transaction. The ``callback`` should also not commit the transaction;
this is handled automatically by ``with_transaction``. If the
``callback`` does commit or abort the transaction without error,
however, ``with_transaction`` will return without taking further
action.
When ``callback`` raises an exception, ``with_transaction``
automatically aborts the current transaction. When ``callback`` or
:meth:`~ClientSession.commit_transaction` raises an exception that
includes the ``"TransientTransactionError"`` error label,
``with_transaction`` starts a new transaction and re-executes
the ``callback``.
When :meth:`~ClientSession.commit_transaction` raises an exception with
the ``"UnknownTransactionCommitResult"`` error label,
``with_transaction`` retries the commit until the result of the
transaction is known.
This method will cease retrying after 120 seconds has elapsed. This
timeout is not configurable and any exception raised by the
``callback`` or by :meth:`ClientSession.commit_transaction` after the
timeout is reached will be re-raised. Applications that desire a
different timeout duration should not use this method.
:Parameters:
- `callback`: The callable ``callback`` to run inside a transaction.
The callable must accept a single argument, this session. Note,
under certain error conditions the callback may be run multiple
| python | {
"resource": ""
} |
q22489 | ClientSession.commit_transaction | train | def commit_transaction(self):
"""Commit a multi-statement transaction.
.. versionadded:: 3.7
"""
self._check_ended()
retry = False
state = self._transaction.state
if state is _TxnState.NONE:
raise InvalidOperation("No transaction started")
elif state in (_TxnState.STARTING, _TxnState.COMMITTED_EMPTY):
# Server transaction was never started, no need to send a command.
self._transaction.state = _TxnState.COMMITTED_EMPTY
return
elif state is _TxnState.ABORTED:
raise InvalidOperation(
"Cannot call commitTransaction after calling abortTransaction")
elif state is _TxnState.COMMITTED:
# We're explicitly retrying the commit, move the state back to
# "in progress" so that _in_transaction returns true.
self._transaction.state = _TxnState.IN_PROGRESS
retry = True
try:
self._finish_transaction_with_retry("commitTransaction", retry)
except ConnectionFailure as exc:
# We do not know if the commit was successfully applied on the
# server or if it satisfied the provided write concern, set the
# unknown commit error label.
| python | {
"resource": ""
} |
q22490 | ClientSession.abort_transaction | train | def abort_transaction(self):
"""Abort a multi-statement transaction.
.. versionadded:: 3.7
"""
self._check_ended()
state = self._transaction.state
if state is _TxnState.NONE:
raise InvalidOperation("No transaction started")
elif state is _TxnState.STARTING:
# Server transaction was never started, no need to send a command.
self._transaction.state = _TxnState.ABORTED
return
elif state is _TxnState.ABORTED:
raise InvalidOperation("Cannot call abortTransaction twice")
elif state in (_TxnState.COMMITTED, _TxnState.COMMITTED_EMPTY):
raise InvalidOperation(
| python | {
"resource": ""
} |
q22491 | ClientSession._finish_transaction_with_retry | train | def _finish_transaction_with_retry(self, command_name, explict_retry):
"""Run commit or abort with one retry after any retryable error.
:Parameters:
- `command_name`: Either "commitTransaction" or "abortTransaction".
- `explict_retry`: True when this is an explict commit retry attempt,
ie the application called session.commit_transaction() twice.
"""
# This can be refactored with MongoClient._retry_with_session.
try:
return self._finish_transaction(command_name, explict_retry)
except ServerSelectionTimeoutError:
raise
except ConnectionFailure as exc:
try:
return self._finish_transaction(command_name, True)
except ServerSelectionTimeoutError:
# Raise the original error so the application can infer that
# an attempt was made.
| python | {
"resource": ""
} |
q22492 | ClientSession._advance_cluster_time | train | def _advance_cluster_time(self, cluster_time):
"""Internal cluster time helper."""
if self._cluster_time is None:
| python | {
"resource": ""
} |
q22493 | ClientSession.advance_cluster_time | train | def advance_cluster_time(self, cluster_time):
"""Update the cluster time for this session.
:Parameters:
- `cluster_time`: The
:data:`~pymongo.client_session.ClientSession.cluster_time` from
another `ClientSession` instance.
"""
if not isinstance(cluster_time, abc.Mapping):
raise | python | {
"resource": ""
} |
q22494 | ClientSession._advance_operation_time | train | def _advance_operation_time(self, operation_time):
"""Internal operation time helper."""
if self._operation_time is None:
| python | {
"resource": ""
} |
q22495 | ClientSession.advance_operation_time | train | def advance_operation_time(self, operation_time):
"""Update the operation time for this session.
:Parameters:
- `operation_time`: The
:data:`~pymongo.client_session.ClientSession.operation_time` from
another `ClientSession` instance.
"""
if not isinstance(operation_time, | python | {
"resource": ""
} |
q22496 | ClientSession._process_response | train | def _process_response(self, reply):
"""Process a response to a command that was run with this session."""
self._advance_cluster_time(reply.get('$clusterTime'))
self._advance_operation_time(reply.get('operationTime'))
if self._in_transaction and self._transaction.sharded:
| python | {
"resource": ""
} |
q22497 | ClientSession._pin_mongos | train | def _pin_mongos(self, server):
"""Pin this session to the given mongos Server."""
self._transaction.sharded = True
| python | {
"resource": ""
} |
q22498 | _parse_write_concern | train | def _parse_write_concern(options):
"""Parse write concern options."""
concern = options.get('w')
wtimeout = options.get('wtimeoutms')
j = options.get('journal')
| python | {
"resource": ""
} |
q22499 | _parse_ssl_options | train | def _parse_ssl_options(options):
"""Parse ssl options."""
use_ssl = options.get('ssl')
if use_ssl is not None:
validate_boolean('ssl', use_ssl)
certfile = options.get('ssl_certfile')
keyfile = options.get('ssl_keyfile')
passphrase = options.get('ssl_pem_passphrase')
ca_certs = options.get('ssl_ca_certs')
cert_reqs = options.get('ssl_cert_reqs')
match_hostname = options.get('ssl_match_hostname', True)
crlfile = options.get('ssl_crlfile')
ssl_kwarg_keys = [k for k in options
if k.startswith('ssl_') and options[k]]
if use_ssl == False and ssl_kwarg_keys:
raise ConfigurationError("ssl has not been enabled but the "
"following ssl parameters have been set: "
"%s. Please set `ssl=True` or remove."
| python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.