func_code_string stringlengths 52 1.94M | func_documentation_string stringlengths 1 47.2k |
|---|---|
def _process_credentials(self, req, resp, origin):
if self._cors_config['allow_credentials_all_origins']:
self._set_allow_credentials(resp)
return True
if origin in self._cors_config['allow_credentials_origins_list']:
self._set_allow_credentials(resp)
... | Adds the Access-Control-Allow-Credentials to the response
if the cors settings indicates it should be set. |
def send_messages(self, email_messages):
if not email_messages:
return
sent_message_count = 0
for email_message in email_messages:
if self._send(email_message):
sent_message_count += 1
return sent_message_count | Sends one or more EmailMessage objects and returns the
number of email messages sent.
Args:
email_messages: A list of Django EmailMessage objects.
Returns:
An integer count of the messages sent.
Raises:
ClientError: An interaction with the Amazon SES ... |
def _send(self, email_message):
pre_send.send(self.__class__, message=email_message)
if not email_message.recipients():
return False
from_email = sanitize_address(email_message.from_email,
email_message.encoding)
recipients = [sa... | Sends an individual message via the Amazon SES HTTP API.
Args:
email_message: A single Django EmailMessage object.
Returns:
True if the EmailMessage was sent successfully, otherwise False.
Raises:
ClientError: An interaction with the Amazon SES HTTP API
... |
def serve(request, path, document_root=None, show_indexes=False, default=''):
# Clean up given path to only allow serving files below document_root.
path = posixpath.normpath(unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Stri... | Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
(r'^(?P<path>.*)$', 'django.views.static.serve',
{'document_root' : '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_inde... |
def was_modified_since(header=None, mtime=0, size=0):
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches.group(1))
header_len = matches... | Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about. |
def get_url(self, obj):
if not hasattr(obj, 'get_absolute_url') or not obj.get_absolute_url():
raise ImproperlyConfigured("No URL configured. You must either \
set a ``get_absolute_url`` method on the %s model or override the %s view's \
``get_url`` method" % (obj.__class__.__name__, self._... | The URL at which the detail page should appear. |
def unbuild_object(self, obj):
logger.debug("Unbuilding %s" % obj)
target_path = os.path.split(self.get_build_path(obj))[0]
if self.fs.exists(target_path):
logger.debug("Removing {}".format(target_path))
self.fs.removetree(target_path) | Deletes the directory at self.get_build_path. |
def unpublish_object(content_type_pk, obj_pk):
ct = ContentType.objects.get_for_id(content_type_pk)
obj = ct.get_object_for_this_type(pk=obj_pk)
try:
# Unbuild the object
logger.info("unpublish_object task has received %s" % obj)
obj.unbuild()
# Run the `publish` managem... | Unbuild all views related to a object and then sync to S3.
Accepts primary keys to retrieve a model object that
inherits bakery's BuildableModel class. |
def prep_directory(self, target_dir):
dirname = path.dirname(target_dir)
if dirname:
dirname = path.join(settings.BUILD_DIR, dirname)
if not self.fs.exists(dirname):
logger.debug("Creating directory at {}{}".format(self.fs_name, dirname))
... | Prepares a new directory to store the file at the provided path, if needed. |
def write_file(self, target_path, html):
logger.debug("Building to {}{}".format(self.fs_name, target_path))
with self.fs.open(smart_text(target_path), 'wb') as outfile:
outfile.write(six.binary_type(html))
outfile.close() | Writes out the provided HTML to the provided path. |
def is_gzippable(self, path):
# First check if gzipping is allowed by the global setting
if not getattr(settings, 'BAKERY_GZIP', False):
return False
# Then check if the content type of this particular file is gzippable
whitelist = getattr(
settings,
... | Returns a boolean indicating if the provided file path is a candidate
for gzipping. |
def gzip_file(self, target_path, html):
logger.debug("Gzipping to {}{}".format(self.fs_name, target_path))
# Write GZIP data to an in-memory buffer
data_buffer = six.BytesIO()
kwargs = dict(
filename=path.basename(target_path),
mode='wb',
file... | Zips up the provided HTML as a companion for the provided path.
Intended to take advantage of the peculiarities of
Amazon S3's GZIP service.
mtime, an option that writes a timestamp to the output file
is set to 0, to avoid having s3cmd do unnecessary uploads because
of differen... |
def get_redirect_url(self, *args, **kwargs):
if self.url:
url = self.url % kwargs
elif self.pattern_name:
try:
url = reverse(self.pattern_name, args=args, kwargs=kwargs)
except NoReverseMatch:
return None
else:
... | Return the URL redirect to. Keyword arguments from the
URL pattern match generating the redirect request
are provided as kwargs to this method. |
def build(self):
for detail_view in self.detail_views:
view = self._get_view(detail_view)
view().build_object(self)
self._build_extra()
self._build_related() | Iterates through the views pointed to by self.detail_views, runs
build_object with `self`, and calls _build_extra()
and _build_related(). |
def unbuild(self):
for detail_view in self.detail_views:
view = self._get_view(detail_view)
view().unbuild_object(self)
self._unbuild_extra()
# _build_related again to kill the object from RSS etc.
self._build_related() | Iterates through the views pointed to by self.detail_views, runs
unbuild_object with `self`, and calls _build_extra()
and _build_related(). |
def save(self, *args, **kwargs):
from bakery import tasks
from django.contrib.contenttypes.models import ContentType
# if obj.save(publish=False) has been passed, we skip everything.
if not kwargs.pop('publish', True):
super(AutoPublishingBuildableModel, self).save(*... | A custom save that publishes or unpublishes the object where
appropriate.
Save with keyword argument obj.save(publish=False) to skip the process. |
def delete(self, *args, **kwargs):
from bakery import tasks
from django.contrib.contenttypes.models import ContentType
# if obj.save(unpublish=False) has been passed, we skip the task.
unpublish = kwargs.pop('unpublish', True)
# Delete it from the database
super(... | Triggers a task that will unpublish the object after it is deleted.
Save with keyword argument obj.delete(unpublish=False) to skip it. |
def handle(self, *args, **options):
logger.info("Build started")
# Set options
self.set_options(*args, **options)
# Get the build directory ready
if not options.get("keep_build_dir"):
self.init_build_dir()
# Build up static files
if not option... | Making it happen. |
def set_options(self, *args, **options):
self.verbosity = int(options.get('verbosity', 1))
# Figure out what build directory to use
if options.get("build_dir"):
self.build_dir = options.get("build_dir")
settings.BUILD_DIR = self.build_dir
else:
... | Configure a few global options before things get going. |
def init_build_dir(self):
# Destroy the build directory, if it exists
logger.debug("Initializing %s" % self.build_dir)
if self.verbosity > 1:
self.stdout.write("Initializing build directory")
if self.fs.exists(self.build_dir):
self.fs.removetree(self.buil... | Clear out the build directory and create a new one. |
def build_static(self, *args, **options):
logger.debug("Building static directory")
if self.verbosity > 1:
self.stdout.write("Building static directory")
management.call_command(
"collectstatic",
interactive=False,
verbosity=0
)
... | Builds the static files directory as well as robots.txt and favicon.ico |
def build_media(self):
logger.debug("Building media directory")
if self.verbosity > 1:
self.stdout.write("Building media directory")
if os.path.exists(self.media_root) and settings.MEDIA_URL:
target_dir = path.join(self.fs_name, self.build_dir, settings.MEDIA_URL... | Build the media files. |
def build_views(self):
# Then loop through and run them all
for view_str in self.view_list:
logger.debug("Building %s" % view_str)
if self.verbosity > 1:
self.stdout.write("Building %s" % view_str)
view = get_callable(view_str)
sel... | Bake out specified buildable views. |
def copytree_and_gzip(self, source_dir, target_dir):
# Figure out what we're building...
build_list = []
# Walk through the source directory...
for (dirpath, dirnames, filenames) in os.walk(source_dir):
for f in filenames:
# Figure out what is going w... | Copies the provided source directory to the provided target directory.
Gzips JavaScript, CSS and HTML and other files along the way. |
def copyfile_and_gzip(self, source_path, target_path):
# And then where we want to copy it to.
target_dir = path.dirname(target_path)
if not self.fs.exists(target_dir):
try:
self.fs.makedirs(target_dir)
except OSError:
pass
... | Copies the provided file to the provided target directory.
Gzips JavaScript, CSS and HTML and other files along the way. |
def handle(self, *args, **options):
# Counts and such we can use to keep tabs on this as they progress
self.uploaded_files = 0
self.uploaded_file_list = []
self.deleted_files = 0
self.deleted_file_list = []
self.start_time = time.time()
# Configure all th... | Sync files in the build directory to a specified S3 bucket |
def set_options(self, options):
self.verbosity = int(options.get('verbosity'))
# Will we be gzipping?
self.gzip = getattr(settings, 'BAKERY_GZIP', False)
# And if so what content types will we be gzipping?
self.gzip_content_types = getattr(
settings,
... | Configure all the many options we'll need to make this happen. |
def get_bucket_file_list(self):
logger.debug("Retrieving bucket object list")
paginator = self.s3_client.get_paginator('list_objects')
options = {
'Bucket': self.aws_bucket_name
}
if self.aws_bucket_prefix:
logger.debug("Adding prefix {} to bucket... | Little utility method that handles pagination and returns
all objects in given bucket. |
def get_local_file_list(self):
file_list = []
for (dirpath, dirnames, filenames) in os.walk(self.build_dir):
for fname in filenames:
# relative path, to sync with the S3 key
local_key = os.path.join(
os.path.relpath(dirpath, self.b... | Walk the local build directory and create a list of relative and
absolute paths to files. |
def sync_with_s3(self):
# Create a list to put all the files we're going to update
self.update_list = []
# Figure out which files need to be updated and upload all these files
logger.debug("Comparing {} local files with {} bucket files".format(
len(self.local_file_li... | Walk through our self.local_files list, and match them with the list
of keys in the S3 bucket. |
def get_md5(self, filename):
with open(filename, 'rb') as f:
m = hashlib.md5(f.read())
return m.hexdigest() | Returns the md5 checksum of the provided file name. |
def get_multipart_md5(self, filename, chunk_size=8 * 1024 * 1024):
# Loop through the file contents ...
md5s = []
with open(filename, 'rb') as fp:
while True:
# Break it into chunks
data = fp.read(chunk_size)
# Finish when ther... | Returns the md5 checksum of the provided file name after breaking it into chunks.
This is done to mirror the method used by Amazon S3 after a multipart upload. |
def compare_local_file(self, file_key):
# Where is the file?
file_path = os.path.join(self.build_dir, file_key)
# If we're in force_publish mode just add it
if self.force_publish:
self.update_list.append((file_key, file_path))
# And quit now
r... | Compares a local version of a file with what's already published.
If an update is needed, the file's key is added self.update_list. |
def upload_to_s3(self, key, filename):
extra_args = {'ACL': self.acl}
# determine the mimetype of the file
guess = mimetypes.guess_type(filename)
content_type = guess[0]
encoding = guess[1]
if content_type:
extra_args['ContentType'] = content_type
... | Set the content type and gzip headers if applicable
and upload the item to S3 |
def _get_bakery_dynamic_attr(self, attname, obj, args=None, default=None):
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr) or args:
args = args[:] if args else []
# Check co_argcount rather than t... | Allows subclasses to provide an attribute (say, 'foo') in three
different ways: As a fixed class-level property or as a method
foo(self) or foo(self, obj). The second argument argument 'obj' is
the "subject" of the current Feed invocation. See the Django Feed
documentation for details.
... |
def get_year(self):
year = super(BuildableYearArchiveView, self).get_year()
fmt = self.get_year_format()
return date(int(year), 1, 1).strftime(fmt) | Return the year from the database in the format expected by the URL. |
def build_year(self, dt):
self.year = str(dt.year)
logger.debug("Building %s" % self.year)
self.request = self.create_request(self.get_url())
target_path = self.get_build_path()
self.build_file(target_path, self.get_content()) | Build the page for the provided year. |
def build_dated_queryset(self):
qs = self.get_dated_queryset()
years = self.get_date_list(qs)
[self.build_year(dt) for dt in years] | Build pages for all years in the queryset. |
def get_month(self):
year = super(BuildableMonthArchiveView, self).get_year()
month = super(BuildableMonthArchiveView, self).get_month()
fmt = self.get_month_format()
return date(int(year), int(month), 1).strftime(fmt) | Return the month from the database in the format expected by the URL. |
def build_dated_queryset(self):
qs = self.get_dated_queryset()
months = self.get_date_list(qs)
[self.build_month(dt) for dt in months] | Build pages for all years in the queryset. |
def unbuild_month(self, dt):
self.year = str(dt.year)
self.month = str(dt.month)
logger.debug("Building %s-%s" % (self.year, self.month))
target_path = os.path.split(self.get_build_path())[0]
if self.fs.exists(target_path):
logger.debug("Removing {}".format(t... | Deletes the directory at self.get_build_path. |
def get_year(self):
year = super(BuildableDayArchiveView, self).get_year()
fmt = self.get_year_format()
dt = date(int(year), 1, 1)
return dt.strftime(fmt) | Return the year from the database in the format expected by the URL. |
def get_month(self):
year = super(BuildableDayArchiveView, self).get_year()
month = super(BuildableDayArchiveView, self).get_month()
fmt = self.get_month_format()
dt = date(int(year), int(month), 1)
return dt.strftime(fmt) | Return the month from the database in the format expected by the URL. |
def get_day(self):
year = super(BuildableDayArchiveView, self).get_year()
month = super(BuildableDayArchiveView, self).get_month()
day = super(BuildableDayArchiveView, self).get_day()
fmt = self.get_day_format()
dt = date(int(year), int(month), int(day))
return d... | Return the day from the database in the format expected by the URL. |
def get_url(self):
return os.path.join(
'/archive',
self.get_year(),
self.get_month(),
self.get_day()
) | The URL at which the detail page should appear.
By default it is /archive/ + the year in self.year_format + the
month in self.month_format + the day in the self.day_format.
An example would be /archive/2016/01/01/. |
def get_build_path(self):
target_path = path.join(settings.BUILD_DIR, self.get_url().lstrip('/'))
if not self.fs.exists(target_path):
logger.debug("Creating {}".format(target_path))
self.fs.makedirs(target_path)
return os.path.join(target_path, 'index.html') | Used to determine where to build the page. Override this if you
would like your page at a different location. By default it
will be built at self.get_url() + "/index.html" |
def build_day(self, dt):
self.month = str(dt.month)
self.year = str(dt.year)
self.day = str(dt.day)
logger.debug("Building %s-%s-%s" % (self.year, self.month, self.day))
self.request = self.create_request(self.get_url())
path = self.get_build_path()
self.... | Build the page for the provided day. |
def build_dated_queryset(self):
qs = self.get_dated_queryset()
days = self.get_date_list(qs, date_type='day')
[self.build_day(dt) for dt in days] | Build pages for all years in the queryset. |
def get_s3_client():
session_kwargs = {}
if hasattr(settings, 'AWS_ACCESS_KEY_ID'):
session_kwargs['aws_access_key_id'] = settings.AWS_ACCESS_KEY_ID
if hasattr(settings, 'AWS_SECRET_ACCESS_KEY'):
session_kwargs['aws_secret_access_key'] = settings.AWS_SECRET_ACCESS_KEY
boto3.setup_de... | A DRY place to make sure AWS credentials in settings override
environment based credentials. Boto3 will fall back to:
http://boto3.readthedocs.io/en/latest/guide/configuration.html |
def get_bucket_page(page):
key_list = page.get('Contents', [])
logger.debug("Retrieving page with {} keys".format(
len(key_list),
))
return dict((k.get('Key'), k) for k in key_list) | Returns all the keys in a s3 bucket paginator page. |
def get_all_objects_in_bucket(
aws_bucket_name,
s3_client=None,
max_keys=1000
):
logger.debug("Retrieving bucket object list")
if not s3_client:
s3_client, s3_resource = get_s3_client()
obj_dict = {}
paginator = s3_client.get_paginator('list_objects')
page_iterat... | Little utility method that handles pagination and returns
all objects in given bucket. |
def batch_delete_s3_objects(
keys,
aws_bucket_name,
chunk_size=100,
s3_client=None
):
if s3_client is None:
s3_client, s3_resource = get_s3_client()
key_chunks = []
for i in range(0, len(keys), chunk_size):
chunk = []
for key in (list(keys)[i:i+10... | Utility method that batch deletes objects in given bucket. |
def lexeme(p):
if isinstance(p, str):
p = string(p)
return regex(r'\s*') >> p << regex(r'\s*') | From a parser (or string), make a parser that consumes
whitespace on either side. |
def is_present(p):
return lexeme(p).optional().map(lambda v: False if v is None else True) | Given a parser or string, make a parser that returns
True if the parser matches, False otherwise |
def parse(self, stream):
(result, _) = (self << eof).parse_partial(stream)
return result | Parse a string or list of tokens and return the result or raise a ParseError. |
def parse_partial(self, stream):
result = self(stream, 0)
if result.status:
return (result.value, stream[result.index:])
else:
raise ParseError(result.expected, stream, result.furthest) | Parse the longest possible prefix of a given string.
Return a tuple of the result and the rest of the string,
or raise a ParseError. |
def extract_key_values(array_value, separators=(';', ',', ':'), **kwargs):
items_sep, fields_sep, keys_sep = separators
return items_sep.join(fields_sep.join(keys_sep.join(x) for x in sorted(it.items()))
for it in array_value) | Serialize array of objects with simple key-values |
def from_schemafile(cls, schemafile):
with open(schemafile) as f:
return cls(json.load(f)) | Create a Flatson instance from a schemafile |
def register_serialization_method(self, name, serialize_func):
if name in self._default_serialization_methods:
raise ValueError("Can't replace original %s serialization method")
self._serialization_methods[name] = serialize_func | Register a custom serialization method that can be
used via schema configuration |
def flatten(self, obj):
return [self._serialize(f, obj) for f in self.fields] | Return a list with the field values |
def flatten_dict(self, obj):
return OrderedDict(zip(self.fieldnames, self.flatten(obj))) | Return an OrderedDict dict preserving order of keys in fieldnames |
def close(self):
LOGGER.debug('Connection %s closing', self.id)
if self.busy:
raise ConnectionBusyError(self)
with self._lock:
if not self.handle.closed:
try:
self.handle.close()
except psycopg2.InterfaceError a... | Close the connection
:raises: ConnectionBusyError |
def busy(self):
if self.handle.isexecuting():
return True
elif self.used_by is None:
return False
return not self.used_by() is None | Return if the connection is currently executing a query or is locked
by a session that still exists.
:rtype: bool |
def free(self):
LOGGER.debug('Connection %s freeing', self.id)
if self.handle.isexecuting():
raise ConnectionBusyError(self)
with self._lock:
self.used_by = None
LOGGER.debug('Connection %s freed', self.id) | Remove the lock on the connection if the connection is not active
:raises: ConnectionBusyError |
def lock(self, session):
if self.busy:
raise ConnectionBusyError(self)
with self._lock:
self.used_by = weakref.ref(session)
LOGGER.debug('Connection %s locked', self.id) | Lock the connection, ensuring that it is not busy and storing
a weakref for the session.
:param queries.Session session: The session to lock the connection with
:raises: ConnectionBusyError |
def add(self, connection):
if id(connection) in self.connections:
raise ValueError('Connection already exists in pool')
if len(self.connections) == self.max_size:
LOGGER.warning('Race condition found when adding new connection')
try:
connectio... | Add a new connection to the pool
:param connection: The connection to add to the pool
:type connection: psycopg2.extensions.connection
:raises: PoolFullError |
def busy_connections(self):
return [c for c in self.connections.values()
if c.busy and not c.closed] | Return a list of active/busy connections
:rtype: list |
def clean(self):
LOGGER.debug('Cleaning the pool')
for connection in [self.connections[k] for k in self.connections if
self.connections[k].closed]:
LOGGER.debug('Removing %s', connection.id)
self.remove(connection.handle)
if self.idle_d... | Clean the pool by removing any closed connections and if the pool's
idle has exceeded its idle TTL, remove all connections. |
def close(self):
for cid in list(self.connections.keys()):
self.remove(self.connections[cid].handle)
LOGGER.debug('Pool %s closed', self.id) | Close the pool by closing and removing all of the connections |
def free(self, connection):
LOGGER.debug('Pool %s freeing connection %s', self.id, id(connection))
try:
self.connection_handle(connection).free()
except KeyError:
raise ConnectionNotFoundError(self.id, id(connection))
if self.idle_connections == list(self... | Free the connection from use by the session that was using it.
:param connection: The connection to free
:type connection: psycopg2.extensions.connection
:raises: ConnectionNotFoundError |
def get(self, session):
idle = self.idle_connections
if idle:
connection = idle.pop(0)
connection.lock(session)
if self.idle_start:
with self._lock:
self.idle_start = None
return connection.handle
raise ... | Return an idle connection and assign the session to the connection
:param queries.Session session: The session to assign
:rtype: psycopg2.extensions.connection
:raises: NoIdleConnectionsError |
def idle_connections(self):
return [c for c in self.connections.values()
if not c.busy and not c.closed] | Return a list of idle connections
:rtype: list |
def lock(self, connection, session):
cid = id(connection)
try:
self.connection_handle(connection).lock(session)
except KeyError:
raise ConnectionNotFoundError(self.id, cid)
else:
if self.idle_start:
with self._lock:
... | Explicitly lock the specified connection
:type connection: psycopg2.extensions.connection
:param connection: The connection to lock
:param queries.Session session: The session to hold the lock |
def remove(self, connection):
cid = id(connection)
if cid not in self.connections:
raise ConnectionNotFoundError(self.id, cid)
self.connection_handle(connection).close()
with self._lock:
del self.connections[cid]
LOGGER.debug('Pool %s removed conn... | Remove the connection from the pool
:param connection: The connection to remove
:type connection: psycopg2.extensions.connection
:raises: ConnectionNotFoundError
:raises: ConnectionBusyError |
def report(self):
return {
'connections': {
'busy': len(self.busy_connections),
'closed': len(self.closed_connections),
'executing': len(self.executing_connections),
'idle': len(self.idle_connections),
'locked':... | Return a report about the pool state and configuration.
:rtype: dict |
def shutdown(self):
with self._lock:
for cid in list(self.connections.keys()):
if self.connections[cid].executing:
raise ConnectionBusyError(cid)
if self.connections[cid].locked:
self.connections[cid].free()
... | Forcefully shutdown the entire pool, closing all non-executing
connections.
:raises: ConnectionBusyError |
def add(cls, pid, connection):
with cls._lock:
cls._ensure_pool_exists(pid)
cls._pools[pid].add(connection) | Add a new connection and session to a pool.
:param str pid: The pool id
:type connection: psycopg2.extensions.connection
:param connection: The connection to add to the pool |
def clean(cls, pid):
with cls._lock:
try:
cls._ensure_pool_exists(pid)
except KeyError:
LOGGER.debug('Pool clean invoked against missing pool %s', pid)
return
cls._pools[pid].clean()
cls._maybe_remove_pool(p... | Clean the specified pool, removing any closed connections or
stale locks.
:param str pid: The pool id to clean |
def create(cls, pid, idle_ttl=DEFAULT_IDLE_TTL, max_size=DEFAULT_MAX_SIZE,
time_method=None):
if pid in cls._pools:
raise KeyError('Pool %s already exists' % pid)
with cls._lock:
LOGGER.debug("Creating Pool: %s (%i/%i)", pid, idle_ttl, max_size)
... | Create a new pool, with the ability to pass in values to override
the default idle TTL and the default maximum size.
A pool's idle TTL defines the amount of time that a pool can be open
without any sessions before it is removed.
A pool's max size defines the maximum number of connectio... |
def free(cls, pid, connection):
with cls._lock:
LOGGER.debug('Freeing %s from pool %s', id(connection), pid)
cls._ensure_pool_exists(pid)
cls._pools[pid].free(connection) | Free a connection that was locked by a session
:param str pid: The pool ID
:param connection: The connection to remove
:type connection: psycopg2.extensions.connection |
def get(cls, pid, session):
with cls._lock:
cls._ensure_pool_exists(pid)
return cls._pools[pid].get(session) | Get an idle, unused connection from the pool. Once a connection has
been retrieved, it will be marked as in-use until it is freed.
:param str pid: The pool ID
:param queries.Session session: The session to assign to the connection
:rtype: psycopg2.extensions.connection |
def get_connection(cls, pid, connection):
with cls._lock:
return cls._pools[pid].connection_handle(connection) | Return the specified :class:`~queries.pool.Connection` from the
pool.
:param str pid: The pool ID
:param connection: The connection to return for
:type connection: psycopg2.extensions.connection
:rtype: queries.pool.Connection |
def has_connection(cls, pid, connection):
with cls._lock:
cls._ensure_pool_exists(pid)
return connection in cls._pools[pid] | Check to see if a pool has the specified connection
:param str pid: The pool ID
:param connection: The connection to check for
:type connection: psycopg2.extensions.connection
:rtype: bool |
def has_idle_connection(cls, pid):
with cls._lock:
cls._ensure_pool_exists(pid)
return bool(cls._pools[pid].idle_connections) | Check to see if a pool has an idle connection
:param str pid: The pool ID
:rtype: bool |
def is_full(cls, pid):
with cls._lock:
cls._ensure_pool_exists(pid)
return cls._pools[pid].is_full | Return a bool indicating if the specified pool is full
:param str pid: The pool id
:rtype: bool |
def lock(cls, pid, connection, session):
with cls._lock:
cls._ensure_pool_exists(pid)
cls._pools[pid].lock(connection, session) | Explicitly lock the specified connection in the pool
:param str pid: The pool id
:type connection: psycopg2.extensions.connection
:param connection: The connection to add to the pool
:param queries.Session session: The session to hold the lock |
def remove(cls, pid):
with cls._lock:
cls._ensure_pool_exists(pid)
cls._pools[pid].close()
del cls._pools[pid] | Remove a pool, closing all connections
:param str pid: The pool ID |
def remove_connection(cls, pid, connection):
cls._ensure_pool_exists(pid)
cls._pools[pid].remove(connection) | Remove a connection from the pool, closing it if is open.
:param str pid: The pool ID
:param connection: The connection to remove
:type connection: psycopg2.extensions.connection
:raises: ConnectionNotFoundError |
def set_idle_ttl(cls, pid, ttl):
with cls._lock:
cls._ensure_pool_exists(pid)
cls._pools[pid].set_idle_ttl(ttl) | Set the idle TTL for a pool, after which it will be destroyed.
:param str pid: The pool id
:param int ttl: The TTL for an idle pool |
def set_max_size(cls, pid, size):
with cls._lock:
cls._ensure_pool_exists(pid)
cls._pools[pid].set_max_size(size) | Set the maximum number of connections for the specified pool
:param str pid: The pool to set the size for
:param int size: The maximum number of connections |
def shutdown(cls):
for pid in list(cls._pools.keys()):
cls._pools[pid].shutdown()
LOGGER.info('Shutdown complete, all pooled connections closed') | Close all connections on in all pools |
def size(cls, pid):
with cls._lock:
cls._ensure_pool_exists(pid)
return len(cls._pools[pid]) | Return the number of connections in the pool
:param str pid: The pool id
:rtype int |
def report(cls):
return {
'timestamp': datetime.datetime.utcnow().isoformat(),
'process': os.getpid(),
'pools': dict([(i, p.report()) for i, p in cls._pools.items()])
} | Return the state of the all of the registered pools.
:rtype: dict |
def _maybe_remove_pool(cls, pid):
if not len(cls._pools[pid]):
del cls._pools[pid] | If the pool has no open connections, remove it
:param str pid: The pool id to clean |
def callproc(self, name, args=None):
try:
self._cursor.callproc(name, args)
except psycopg2.Error as err:
self._incr_exceptions()
raise err
finally:
self._incr_executions()
return results.Results(self._cursor) | Call a stored procedure on the server, returning the results in a
:py:class:`queries.Results` instance.
:param str name: The procedure name
:param list args: The list of arguments to pass in
:rtype: queries.Results
:raises: queries.DataError
:raises: queries.DatabaseErro... |
def close(self):
if not self._conn:
raise psycopg2.InterfaceError('Connection not open')
LOGGER.info('Closing connection %r in %s', self._conn, self.pid)
self._pool_manager.free(self.pid, self._conn)
self._pool_manager.remove_connection(self.pid, self._conn)
... | Explicitly close the connection and remove it from the connection
pool if pooling is enabled. If the connection is already closed
:raises: psycopg2.InterfaceError |
def pid(self):
return hashlib.md5(':'.join([self.__class__.__name__,
self._uri]).encode('utf-8')).hexdigest() | Return the pool ID used for connection pooling.
:rtype: str |
def query(self, sql, parameters=None):
try:
self._cursor.execute(sql, parameters)
except psycopg2.Error as err:
self._incr_exceptions()
raise err
finally:
self._incr_executions()
return results.Results(self._cursor) | A generator to issue a query on the server, mogrifying the
parameters against the sql statement. Results are returned as a
:py:class:`queries.Results` object which can act as an iterator and
has multiple ways to access the result data.
:param str sql: The SQL statement
:param di... |
def set_encoding(self, value=DEFAULT_ENCODING):
if self._conn.encoding != value:
self._conn.set_client_encoding(value) | Set the client encoding for the session if the value specified
is different than the current client encoding.
:param str value: The encoding value to use |
def _cleanup(self):
if self._cursor:
LOGGER.debug('Closing the cursor on %s', self.pid)
self._cursor.close()
self._cursor = None
if self._conn:
LOGGER.debug('Freeing %s in the pool', self.pid)
try:
pool.PoolManager.inst... | Remove the connection from the stack, closing out the cursor |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.