repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
btimby/fulltext | fulltext/__init__.py | backend_from_fobj | python | def backend_from_fobj(f):
if magic is None:
warn("magic lib is not installed; assuming mime type %r" % (
DEFAULT_MIME))
return backend_from_mime(DEFAULT_MIME)
else:
offset = f.tell()
try:
f.seek(0)
chunk = f.read(MAGIC_BUFFER_SIZE)
mime = magic.from_buffer(chunk, mime=True)
return backend_from_mime(mime)
finally:
f.seek(offset) | Determine backend module object from a file object. | train | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/__init__.py#L482-L496 | [
"def backend_from_mime(mime):\n \"\"\"Determine backend module object from a mime string.\"\"\"\n try:\n mod_name = MIMETYPE_TO_BACKENDS[mime]\n\n except KeyError:\n msg = \"No handler for %r, defaulting to %r\" % (mime, DEFAULT_MIME)\n if 'FULLTEXT_TESTING' in os.environ:\n warn(msg)\n else:\n LOGGER.debug(msg)\n\n mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]\n mod = import_mod(mod_name)\n return mod\n"
] | from __future__ import absolute_import
import errno
import re
import logging
import os
import mimetypes
import sys
from os.path import splitext
from six import string_types
from six import PY3
from fulltext.util import warn
from fulltext.util import magic
from fulltext.util import is_file_path
from fulltext.util import fobj_to_tempfile
from fulltext.util import is_windows
__all__ = ["get", "register_backend"]
# --- overridable defaults
ENCODING = sys.getfilesystemencoding()
ENCODING_ERRORS = "strict"
DEFAULT_MIME = 'application/octet-stream'
# --- others
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.NullHandler())
STRIP_WHITE = re.compile(r'[ \t\v\f\r\n]+')
SENTINAL = object()
MIMETYPE_TO_BACKENDS = {}
EXTS_TO_MIMETYPES = {}
MAGIC_BUFFER_SIZE = 1024
mimetypes.init()
_MIMETYPES_TO_EXT = dict([(v, k) for k, v in mimetypes.types_map.items()])
# A list of extensions which will be treated as pure text.
# This takes precedence over register_backend().
# https://www.openoffice.org/dev_docs/source/file_extensions.html
_TEXT_EXTS = set((
".asm", # Non-UNIX assembler source file
".asp", # Active Server Page
".awk", # An awk script file
".bat", # MS-DOS batch file
".c", # C language file
".class", # Compiled java source code file
".cmd", # Compiler command file
".cpp", # C++ language file
".cxx", # C++ language file
".def", # Win32 library definition file
".dpc", # Source dependency file containing list of dependencies
".dpj", # Java source dependency file containing list of dependencies
".h", # C header file
".hpp", # Generated C++ header or header plus plus file
".hrc", # An ".src", # include header file
".hxx", # C++ header file
".in",
".inc", # Include file
".ini", # Initialization file
".inl", # Inline header file
".jar", # Java classes archive file
".java", # Java language file
".js", # JavaScript code file
".jsp", # Java Server Page file
".kdelnk", # KDE1 configuration file
".l", # Lex source code file
".ll", # Lex source code file
".lnx", # Linux-specific makefile
".log", # Log file
".lst", # ASCII database file used in solenv
".MacOS",
".md", # Markdown language.
".mk", # A dmake makefile
".mod", # BASIC module file
".par", # Script particles file
".pl", # Perl script
".plc", # Former build script file, now obsolete
".pld", # Former build script file, now obsolete
".pm", # Perl module file
".pmk", # Project makefiles
".pre", # Preprocessor output from scpcomp
".py", # Python
".pyx", # Cython
".r", # Resource file for Macintosh
".rc", # A dmake recursive makefile or a Win32 resource script file
".rdb", # Interface and type description database (type library)
".res", # Resource file
".rst", # Restructured text
".s", # Assembler source file (UNIX)
".sbl", # BASIC file
".scp", # Script source file
".sh", # Shell script
".src", # Source resource string file
".txt", # Language text file
".y", # Yacc source code file
".yaml", # Yaml
".yml", # Yaml
".yxx", # Bison source code file
))
# XXX: dirty hack for pyinstaller so that it includes these modules.
# TODO: find a way to do this in pyinstaller.spec instead.
if is_windows() and hasattr(sys, '_MEIPASS'):
from fulltext.backends import __bin # NOQA
from fulltext.backends import __csv # NOQA
from fulltext.backends import __doc # NOQA
from fulltext.backends import __docx # NOQA
from fulltext.backends import __eml # NOQA
from fulltext.backends import __epub # NOQA
from fulltext.backends import __gz # NOQA
from fulltext.backends import __html # NOQA
from fulltext.backends import __hwp # NOQA
from fulltext.backends import __json # NOQA
from fulltext.backends import __mbox # NOQA
# XXX couldn't find a way to install ExtractMessage lib with
# pyinstaller.
# from fulltext.backends import __msg # NOQA
from fulltext.backends import __ocr # NOQA
from fulltext.backends import __odt # NOQA
from fulltext.backends import __pdf # NOQA
from fulltext.backends import __pptx # NOQA
from fulltext.backends import __ps # NOQA
from fulltext.backends import __rar # NOQA
from fulltext.backends import __rtf # NOQA
from fulltext.backends import __text # NOQA
from fulltext.backends import __xlsx # NOQA
from fulltext.backends import __xml # NOQA
from fulltext.backends import __zip # NOQA
# =====================================================================
# --- backends
# =====================================================================
def register_backend(mimetype, module, extensions=None):
"""Register a backend.
`mimetype`: a mimetype string (e.g. 'text/plain')
`module`: an import string (e.g. path.to.my.module)
`extensions`: a list of extensions (e.g. ['txt', 'text'])
"""
if mimetype in MIMETYPE_TO_BACKENDS:
warn("overwriting %r mimetype which was already set" % mimetype)
MIMETYPE_TO_BACKENDS[mimetype] = module
if extensions is None:
try:
ext = _MIMETYPES_TO_EXT[mimetype]
except KeyError:
raise KeyError(
"mimetypes module has no extension associated "
"with %r mimetype; use 'extensions' arg yourself" % mimetype)
assert ext, ext
EXTS_TO_MIMETYPES[ext] = mimetype
else:
if not isinstance(extensions, (list, tuple, set, frozenset)):
raise TypeError("invalid extensions type (got %r)" % extensions)
for ext in set(extensions):
ext = ext if ext.startswith('.') else '.' + ext
assert ext, ext
EXTS_TO_MIMETYPES[ext] = mimetype
register_backend(
'application/zip',
'fulltext.backends.__zip',
extensions=[".zip"])
register_backend(
'application/x-rar-compressed',
'fulltext.backends.__rar',
extensions=['.rar'])
for mt in ("text/xml", "application/xml", "application/x-xml"):
register_backend(
mt,
'fulltext.backends.__xml',
extensions=[".xml", ".xsd"])
register_backend(
'application/vnd.ms-excel',
'fulltext.backends.__xlsx',
extensions=['.xls', '.xlsx'])
register_backend(
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'fulltext.backends.__xlsx',
extensions=['.xlsx'])
register_backend(
'text/plain',
'fulltext.backends.__text',
extensions=['.txt', '.text'])
register_backend(
'application/rtf',
'fulltext.backends.__rtf',
extensions=['.rtf'])
register_backend(
'application/vnd.openxmlformats-officedocument.presentationml.presentation', # NOQA
'fulltext.backends.__pptx',
extensions=['.pptx'])
register_backend(
'application/pdf',
'fulltext.backends.__pdf',
extensions=['.pdf'])
register_backend(
'application/vnd.oasis.opendocument.text',
'fulltext.backends.__odt',
extensions=['.odt'])
register_backend(
'application/vnd.oasis.opendocument.spreadsheet',
'fulltext.backends.__odt',
extensions=['.ods'])
# images
register_backend(
'image/jpeg',
'fulltext.backends.__ocr',
extensions=['.jpg', '.jpeg'])
register_backend(
'image/bmp',
'fulltext.backends.__ocr',
extensions=['.bmp'])
register_backend(
'image/png',
'fulltext.backends.__ocr',
extensions=['.png'])
register_backend(
'image/gif',
'fulltext.backends.__ocr',
extensions=['.gif'])
register_backend(
'application/x-hwp',
'fulltext.backends.__hwp',
extensions=['.hwp'])
for mt in ('text/html', 'application/html', 'text/xhtml'):
register_backend(
mt,
'fulltext.backends.__html',
extensions=['.htm', '.html', '.xhtml'])
register_backend(
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'fulltext.backends.__docx',
extensions=['.docx'])
register_backend(
'application/msword',
'fulltext.backends.__doc',
extensions=['.doc'])
for mt in ('text/csv', 'text/tsv', 'text/psv'):
register_backend(
mt,
'fulltext.backends.__csv',
extensions=['.csv', '.tsv', '.psv', '.tab'])
for mt in ("application/epub", "application/epub+zip"):
register_backend(
mt,
'fulltext.backends.__epub',
extensions=[".epub"])
register_backend(
'application/postscript',
'fulltext.backends.__ps',
extensions=[".ps", ".eps", ".ai"])
register_backend(
'message/rfc822',
'fulltext.backends.__eml',
extensions=['.eml'])
register_backend(
'application/mbox',
'fulltext.backends.__mbox',
extensions=['.mbox'])
register_backend(
'application/vnd.ms-outlook',
'fulltext.backends.__msg',
extensions=['.msg'])
register_backend(
'application/gzip',
'fulltext.backends.__gz',
extensions=['.gz'])
register_backend(
'application/json',
'fulltext.backends.__json',
extensions=['.json'])
# default backend.
register_backend(
'application/octet-stream',
'fulltext.backends.__bin',
extensions=['.a', '.bin'])
# Extensions which will be treated as pure text.
# We just come up with a custom mime name.
for ext in _TEXT_EXTS:
register_backend(
'[custom-fulltext-mime]/%s' % ext,
'fulltext.backends.__text',
extensions=[ext])
# =====================================================================
# --- utils
# =====================================================================
def is_binary(f):
"""Return True if binary mode."""
# NOTE: order matters here. We don't bail on Python 2 just yet. Both
# codecs.open() and io.open() can open in text mode, both set the encoding
# attribute. We must do that check first.
# If it has a decoding attribute with a value, it is text mode.
if getattr(f, "encoding", None):
return False
# Python 2 makes no further distinction.
if not PY3:
return True
# If the file has a mode, and it contains b, it is binary.
try:
if 'b' in getattr(f, 'mode', ''):
return True
except TypeError:
import gzip
if isinstance(f, gzip.GzipFile):
return True # in gzip mode is an integer
raise
# Can we sniff?
try:
f.seek(0, os.SEEK_CUR)
except (AttributeError, IOError):
return False
# Finally, let's sniff by reading a byte.
byte = f.read(1)
f.seek(-1, os.SEEK_CUR)
return hasattr(byte, 'decode')
def handle_path(backend_inst, path, **kwargs):
"""
Handle a path.
Called by `get()` when provided a path. This function will prefer the
backend's `handle_path()` if one is provided Otherwise, it will open the
given path then use `handle_fobj()`.
"""
if callable(getattr(backend_inst, 'handle_path', None)):
# Prefer handle_path() if present.
LOGGER.debug("using handle_path")
return backend_inst.handle_path(path)
elif callable(getattr(backend_inst, 'handle_fobj', None)):
# Fallback to handle_fobj(). No warning here since the performance hit
# is minimal.
LOGGER.debug("using handle_fobj")
with open(path, 'rb') as f:
return backend_inst.handle_fobj(f)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend_inst.__name__)
def handle_fobj(backend, f, **kwargs):
"""
Handle a file-like object.
Called by `get()` when provided a file-like. This function will prefer the
backend's `handle_fobj()` if one is provided. Otherwise, it will write the
data to a temporary file and call `handle_path()`.
"""
if not is_binary(f):
raise AssertionError('File must be opened in binary mode.')
if callable(getattr(backend, 'handle_fobj', None)):
# Prefer handle_fobj() if present.
LOGGER.debug("using handle_fobj")
return backend.handle_fobj(f)
elif callable(getattr(backend, 'handle_path', None)):
# Fallback to handle_path(). Warn user since this is potentially
# expensive.
LOGGER.debug("using handle_path")
LOGGER.warning(
"Using disk, %r backend does not provide `handle_fobj()`", backend)
ext = ''
if 'ext' in kwargs:
ext = '.' + kwargs['ext']
with fobj_to_tempfile(f, suffix=ext) as fname:
return backend.handle_path(fname, **kwargs)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend.__name__)
def import_mod(mod_name):
return __import__(mod_name, fromlist=[' '])
def backend_from_mime(mime):
"""Determine backend module object from a mime string."""
try:
mod_name = MIMETYPE_TO_BACKENDS[mime]
except KeyError:
msg = "No handler for %r, defaulting to %r" % (mime, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
mod = import_mod(mod_name)
return mod
def backend_from_fname(name):
"""Determine backend module object from a file name."""
ext = splitext(name)[1]
try:
mime = EXTS_TO_MIMETYPES[ext]
except KeyError:
try:
f = open(name, 'rb')
except IOError as e:
# The file may not exist, we are being asked to determine it's type
# from it's name. Other errors are unexpected.
if e.errno != errno.ENOENT:
raise
# We will have to fall back upon the default backend.
msg = "No handler for %r, defaulting to %r" % (ext, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
else:
with f:
return backend_from_fobj(f)
else:
mod_name = MIMETYPE_TO_BACKENDS[mime]
mod = import_mod(mod_name)
return mod
def backend_inst_from_mod(mod, encoding, encoding_errors, kwargs):
"""Given a mod and a set of opts return an instantiated
Backend class.
"""
kw = dict(encoding=encoding, encoding_errors=encoding_errors,
kwargs=kwargs)
try:
klass = getattr(mod, "Backend")
except AttributeError:
raise AttributeError("%r mod does not define any backend class" % mod)
inst = klass(**kw)
try:
inst.check(title=False)
except Exception as err:
bin_mod = "fulltext.backends.__bin"
warn("can't use %r due to %r; use %r backend instead" % (
mod, str(err), bin_mod))
inst = import_mod(bin_mod).Backend(**kw)
inst.check(title=False)
LOGGER.debug("using %r" % inst)
return inst
# =====================================================================
# --- public API
# =====================================================================
def _get(path_or_file, default, mime, name, backend, encoding,
encoding_errors, kwargs, _wtitle):
if encoding is None:
encoding = ENCODING
if encoding_errors is None:
encoding_errors = ENCODING_ERRORS
kwargs = kwargs.copy() if kwargs is not None else {}
kwargs.setdefault("mime", mime)
# Find backend module.
if backend is None:
if mime:
backend_mod = backend_from_mime(mime)
elif name:
backend_mod = backend_from_fname(name)
else:
if is_file_path(path_or_file):
backend_mod = backend_from_fname(path_or_file)
else:
if hasattr(path_or_file, "name"):
backend_mod = backend_from_fname(path_or_file.name)
else:
backend_mod = backend_from_fobj(path_or_file)
else:
if isinstance(backend, string_types):
try:
mime = EXTS_TO_MIMETYPES['.' + backend]
except KeyError:
raise ValueError("invalid backend %r" % backend)
backend_mod = backend_from_mime(mime)
else:
backend_mod = backend
# Get backend class.
inst = backend_inst_from_mod(
backend_mod, encoding, encoding_errors, kwargs)
fun = handle_path if is_file_path(path_or_file) else handle_fobj
# Run handle_ function, handle callbacks.
title = None
inst.setup()
try:
text = fun(inst, path_or_file)
if _wtitle:
try:
title = inst.handle_title(path_or_file)
except Exception:
LOGGER.exception("error while getting title (setting to None)")
finally:
inst.teardown()
assert text is not None, "backend function returned None"
text = STRIP_WHITE.sub(' ', text)
text = text.strip()
return (text, title)
def get(path_or_file, default=SENTINAL, mime=None, name=None, backend=None,
encoding=None, encoding_errors=None, kwargs=None,
_wtitle=False):
"""
Get document full text.
Accepts a path or file-like object.
* If given, `default` is returned instead of an error.
* `backend` is either a module object or a string specifying which
default backend to use (e.g. "doc"); take a look at backends
directory to see a list of default backends.
* `mime` and `name` should be passed if the information
is available to caller, otherwise a best guess is made.
If both are specified `mime` takes precedence.
* `encoding` and `encoding_errors` are used to handle text encoding.
They are taken into consideration mostly only by pure-python
backends which do not rely on CLI tools.
Default to "utf8" and "strict" respectively.
* `kwargs` are passed to the underlying backend.
"""
try:
text, title = _get(
path_or_file, default=default, mime=mime, name=name,
backend=backend, kwargs=kwargs, encoding=encoding,
encoding_errors=encoding_errors, _wtitle=_wtitle)
if _wtitle:
return (text, title)
else:
return text
except Exception as e:
if default is not SENTINAL:
LOGGER.exception(e)
return default
raise
def get_with_title(*args, **kwargs):
"""Like get() but also tries to determine document title.
Returns a (text, title) tuple.
"""
kwargs['_wtitle'] = True
return get(*args, **kwargs)
|
btimby/fulltext | fulltext/__init__.py | backend_inst_from_mod | python | def backend_inst_from_mod(mod, encoding, encoding_errors, kwargs):
kw = dict(encoding=encoding, encoding_errors=encoding_errors,
kwargs=kwargs)
try:
klass = getattr(mod, "Backend")
except AttributeError:
raise AttributeError("%r mod does not define any backend class" % mod)
inst = klass(**kw)
try:
inst.check(title=False)
except Exception as err:
bin_mod = "fulltext.backends.__bin"
warn("can't use %r due to %r; use %r backend instead" % (
mod, str(err), bin_mod))
inst = import_mod(bin_mod).Backend(**kw)
inst.check(title=False)
LOGGER.debug("using %r" % inst)
return inst | Given a mod and a set of opts return an instantiated
Backend class. | train | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/__init__.py#L499-L519 | [
"def warn(msg):\n warnings.warn(msg, UserWarning, stacklevel=2)\n LOGGER.warning(msg)\n",
"def import_mod(mod_name):\n return __import__(mod_name, fromlist=[' '])\n"
] | from __future__ import absolute_import
import errno
import re
import logging
import os
import mimetypes
import sys
from os.path import splitext
from six import string_types
from six import PY3
from fulltext.util import warn
from fulltext.util import magic
from fulltext.util import is_file_path
from fulltext.util import fobj_to_tempfile
from fulltext.util import is_windows
__all__ = ["get", "register_backend"]
# --- overridable defaults
ENCODING = sys.getfilesystemencoding()
ENCODING_ERRORS = "strict"
DEFAULT_MIME = 'application/octet-stream'
# --- others
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.NullHandler())
STRIP_WHITE = re.compile(r'[ \t\v\f\r\n]+')
SENTINAL = object()
MIMETYPE_TO_BACKENDS = {}
EXTS_TO_MIMETYPES = {}
MAGIC_BUFFER_SIZE = 1024
mimetypes.init()
_MIMETYPES_TO_EXT = dict([(v, k) for k, v in mimetypes.types_map.items()])
# A list of extensions which will be treated as pure text.
# This takes precedence over register_backend().
# https://www.openoffice.org/dev_docs/source/file_extensions.html
_TEXT_EXTS = set((
".asm", # Non-UNIX assembler source file
".asp", # Active Server Page
".awk", # An awk script file
".bat", # MS-DOS batch file
".c", # C language file
".class", # Compiled java source code file
".cmd", # Compiler command file
".cpp", # C++ language file
".cxx", # C++ language file
".def", # Win32 library definition file
".dpc", # Source dependency file containing list of dependencies
".dpj", # Java source dependency file containing list of dependencies
".h", # C header file
".hpp", # Generated C++ header or header plus plus file
".hrc", # An ".src", # include header file
".hxx", # C++ header file
".in",
".inc", # Include file
".ini", # Initialization file
".inl", # Inline header file
".jar", # Java classes archive file
".java", # Java language file
".js", # JavaScript code file
".jsp", # Java Server Page file
".kdelnk", # KDE1 configuration file
".l", # Lex source code file
".ll", # Lex source code file
".lnx", # Linux-specific makefile
".log", # Log file
".lst", # ASCII database file used in solenv
".MacOS",
".md", # Markdown language.
".mk", # A dmake makefile
".mod", # BASIC module file
".par", # Script particles file
".pl", # Perl script
".plc", # Former build script file, now obsolete
".pld", # Former build script file, now obsolete
".pm", # Perl module file
".pmk", # Project makefiles
".pre", # Preprocessor output from scpcomp
".py", # Python
".pyx", # Cython
".r", # Resource file for Macintosh
".rc", # A dmake recursive makefile or a Win32 resource script file
".rdb", # Interface and type description database (type library)
".res", # Resource file
".rst", # Restructured text
".s", # Assembler source file (UNIX)
".sbl", # BASIC file
".scp", # Script source file
".sh", # Shell script
".src", # Source resource string file
".txt", # Language text file
".y", # Yacc source code file
".yaml", # Yaml
".yml", # Yaml
".yxx", # Bison source code file
))
# XXX: dirty hack for pyinstaller so that it includes these modules.
# TODO: find a way to do this in pyinstaller.spec instead.
if is_windows() and hasattr(sys, '_MEIPASS'):
from fulltext.backends import __bin # NOQA
from fulltext.backends import __csv # NOQA
from fulltext.backends import __doc # NOQA
from fulltext.backends import __docx # NOQA
from fulltext.backends import __eml # NOQA
from fulltext.backends import __epub # NOQA
from fulltext.backends import __gz # NOQA
from fulltext.backends import __html # NOQA
from fulltext.backends import __hwp # NOQA
from fulltext.backends import __json # NOQA
from fulltext.backends import __mbox # NOQA
# XXX couldn't find a way to install ExtractMessage lib with
# pyinstaller.
# from fulltext.backends import __msg # NOQA
from fulltext.backends import __ocr # NOQA
from fulltext.backends import __odt # NOQA
from fulltext.backends import __pdf # NOQA
from fulltext.backends import __pptx # NOQA
from fulltext.backends import __ps # NOQA
from fulltext.backends import __rar # NOQA
from fulltext.backends import __rtf # NOQA
from fulltext.backends import __text # NOQA
from fulltext.backends import __xlsx # NOQA
from fulltext.backends import __xml # NOQA
from fulltext.backends import __zip # NOQA
# =====================================================================
# --- backends
# =====================================================================
def register_backend(mimetype, module, extensions=None):
"""Register a backend.
`mimetype`: a mimetype string (e.g. 'text/plain')
`module`: an import string (e.g. path.to.my.module)
`extensions`: a list of extensions (e.g. ['txt', 'text'])
"""
if mimetype in MIMETYPE_TO_BACKENDS:
warn("overwriting %r mimetype which was already set" % mimetype)
MIMETYPE_TO_BACKENDS[mimetype] = module
if extensions is None:
try:
ext = _MIMETYPES_TO_EXT[mimetype]
except KeyError:
raise KeyError(
"mimetypes module has no extension associated "
"with %r mimetype; use 'extensions' arg yourself" % mimetype)
assert ext, ext
EXTS_TO_MIMETYPES[ext] = mimetype
else:
if not isinstance(extensions, (list, tuple, set, frozenset)):
raise TypeError("invalid extensions type (got %r)" % extensions)
for ext in set(extensions):
ext = ext if ext.startswith('.') else '.' + ext
assert ext, ext
EXTS_TO_MIMETYPES[ext] = mimetype
register_backend(
'application/zip',
'fulltext.backends.__zip',
extensions=[".zip"])
register_backend(
'application/x-rar-compressed',
'fulltext.backends.__rar',
extensions=['.rar'])
for mt in ("text/xml", "application/xml", "application/x-xml"):
register_backend(
mt,
'fulltext.backends.__xml',
extensions=[".xml", ".xsd"])
register_backend(
'application/vnd.ms-excel',
'fulltext.backends.__xlsx',
extensions=['.xls', '.xlsx'])
register_backend(
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'fulltext.backends.__xlsx',
extensions=['.xlsx'])
register_backend(
'text/plain',
'fulltext.backends.__text',
extensions=['.txt', '.text'])
register_backend(
'application/rtf',
'fulltext.backends.__rtf',
extensions=['.rtf'])
register_backend(
'application/vnd.openxmlformats-officedocument.presentationml.presentation', # NOQA
'fulltext.backends.__pptx',
extensions=['.pptx'])
register_backend(
'application/pdf',
'fulltext.backends.__pdf',
extensions=['.pdf'])
register_backend(
'application/vnd.oasis.opendocument.text',
'fulltext.backends.__odt',
extensions=['.odt'])
register_backend(
'application/vnd.oasis.opendocument.spreadsheet',
'fulltext.backends.__odt',
extensions=['.ods'])
# images
register_backend(
'image/jpeg',
'fulltext.backends.__ocr',
extensions=['.jpg', '.jpeg'])
register_backend(
'image/bmp',
'fulltext.backends.__ocr',
extensions=['.bmp'])
register_backend(
'image/png',
'fulltext.backends.__ocr',
extensions=['.png'])
register_backend(
'image/gif',
'fulltext.backends.__ocr',
extensions=['.gif'])
register_backend(
'application/x-hwp',
'fulltext.backends.__hwp',
extensions=['.hwp'])
for mt in ('text/html', 'application/html', 'text/xhtml'):
register_backend(
mt,
'fulltext.backends.__html',
extensions=['.htm', '.html', '.xhtml'])
register_backend(
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'fulltext.backends.__docx',
extensions=['.docx'])
register_backend(
'application/msword',
'fulltext.backends.__doc',
extensions=['.doc'])
for mt in ('text/csv', 'text/tsv', 'text/psv'):
register_backend(
mt,
'fulltext.backends.__csv',
extensions=['.csv', '.tsv', '.psv', '.tab'])
for mt in ("application/epub", "application/epub+zip"):
register_backend(
mt,
'fulltext.backends.__epub',
extensions=[".epub"])
register_backend(
'application/postscript',
'fulltext.backends.__ps',
extensions=[".ps", ".eps", ".ai"])
register_backend(
'message/rfc822',
'fulltext.backends.__eml',
extensions=['.eml'])
register_backend(
'application/mbox',
'fulltext.backends.__mbox',
extensions=['.mbox'])
register_backend(
'application/vnd.ms-outlook',
'fulltext.backends.__msg',
extensions=['.msg'])
register_backend(
'application/gzip',
'fulltext.backends.__gz',
extensions=['.gz'])
register_backend(
'application/json',
'fulltext.backends.__json',
extensions=['.json'])
# default backend.
register_backend(
'application/octet-stream',
'fulltext.backends.__bin',
extensions=['.a', '.bin'])
# Extensions which will be treated as pure text.
# We just come up with a custom mime name.
for ext in _TEXT_EXTS:
register_backend(
'[custom-fulltext-mime]/%s' % ext,
'fulltext.backends.__text',
extensions=[ext])
# =====================================================================
# --- utils
# =====================================================================
def is_binary(f):
"""Return True if binary mode."""
# NOTE: order matters here. We don't bail on Python 2 just yet. Both
# codecs.open() and io.open() can open in text mode, both set the encoding
# attribute. We must do that check first.
# If it has a decoding attribute with a value, it is text mode.
if getattr(f, "encoding", None):
return False
# Python 2 makes no further distinction.
if not PY3:
return True
# If the file has a mode, and it contains b, it is binary.
try:
if 'b' in getattr(f, 'mode', ''):
return True
except TypeError:
import gzip
if isinstance(f, gzip.GzipFile):
return True # in gzip mode is an integer
raise
# Can we sniff?
try:
f.seek(0, os.SEEK_CUR)
except (AttributeError, IOError):
return False
# Finally, let's sniff by reading a byte.
byte = f.read(1)
f.seek(-1, os.SEEK_CUR)
return hasattr(byte, 'decode')
def handle_path(backend_inst, path, **kwargs):
"""
Handle a path.
Called by `get()` when provided a path. This function will prefer the
backend's `handle_path()` if one is provided Otherwise, it will open the
given path then use `handle_fobj()`.
"""
if callable(getattr(backend_inst, 'handle_path', None)):
# Prefer handle_path() if present.
LOGGER.debug("using handle_path")
return backend_inst.handle_path(path)
elif callable(getattr(backend_inst, 'handle_fobj', None)):
# Fallback to handle_fobj(). No warning here since the performance hit
# is minimal.
LOGGER.debug("using handle_fobj")
with open(path, 'rb') as f:
return backend_inst.handle_fobj(f)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend_inst.__name__)
def handle_fobj(backend, f, **kwargs):
"""
Handle a file-like object.
Called by `get()` when provided a file-like. This function will prefer the
backend's `handle_fobj()` if one is provided. Otherwise, it will write the
data to a temporary file and call `handle_path()`.
"""
if not is_binary(f):
raise AssertionError('File must be opened in binary mode.')
if callable(getattr(backend, 'handle_fobj', None)):
# Prefer handle_fobj() if present.
LOGGER.debug("using handle_fobj")
return backend.handle_fobj(f)
elif callable(getattr(backend, 'handle_path', None)):
# Fallback to handle_path(). Warn user since this is potentially
# expensive.
LOGGER.debug("using handle_path")
LOGGER.warning(
"Using disk, %r backend does not provide `handle_fobj()`", backend)
ext = ''
if 'ext' in kwargs:
ext = '.' + kwargs['ext']
with fobj_to_tempfile(f, suffix=ext) as fname:
return backend.handle_path(fname, **kwargs)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend.__name__)
def import_mod(mod_name):
return __import__(mod_name, fromlist=[' '])
def backend_from_mime(mime):
"""Determine backend module object from a mime string."""
try:
mod_name = MIMETYPE_TO_BACKENDS[mime]
except KeyError:
msg = "No handler for %r, defaulting to %r" % (mime, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
mod = import_mod(mod_name)
return mod
def backend_from_fname(name):
"""Determine backend module object from a file name."""
ext = splitext(name)[1]
try:
mime = EXTS_TO_MIMETYPES[ext]
except KeyError:
try:
f = open(name, 'rb')
except IOError as e:
# The file may not exist, we are being asked to determine it's type
# from it's name. Other errors are unexpected.
if e.errno != errno.ENOENT:
raise
# We will have to fall back upon the default backend.
msg = "No handler for %r, defaulting to %r" % (ext, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
else:
with f:
return backend_from_fobj(f)
else:
mod_name = MIMETYPE_TO_BACKENDS[mime]
mod = import_mod(mod_name)
return mod
def backend_from_fobj(f):
"""Determine backend module object from a file object."""
if magic is None:
warn("magic lib is not installed; assuming mime type %r" % (
DEFAULT_MIME))
return backend_from_mime(DEFAULT_MIME)
else:
offset = f.tell()
try:
f.seek(0)
chunk = f.read(MAGIC_BUFFER_SIZE)
mime = magic.from_buffer(chunk, mime=True)
return backend_from_mime(mime)
finally:
f.seek(offset)
# =====================================================================
# --- public API
# =====================================================================
def _get(path_or_file, default, mime, name, backend, encoding,
encoding_errors, kwargs, _wtitle):
if encoding is None:
encoding = ENCODING
if encoding_errors is None:
encoding_errors = ENCODING_ERRORS
kwargs = kwargs.copy() if kwargs is not None else {}
kwargs.setdefault("mime", mime)
# Find backend module.
if backend is None:
if mime:
backend_mod = backend_from_mime(mime)
elif name:
backend_mod = backend_from_fname(name)
else:
if is_file_path(path_or_file):
backend_mod = backend_from_fname(path_or_file)
else:
if hasattr(path_or_file, "name"):
backend_mod = backend_from_fname(path_or_file.name)
else:
backend_mod = backend_from_fobj(path_or_file)
else:
if isinstance(backend, string_types):
try:
mime = EXTS_TO_MIMETYPES['.' + backend]
except KeyError:
raise ValueError("invalid backend %r" % backend)
backend_mod = backend_from_mime(mime)
else:
backend_mod = backend
# Get backend class.
inst = backend_inst_from_mod(
backend_mod, encoding, encoding_errors, kwargs)
fun = handle_path if is_file_path(path_or_file) else handle_fobj
# Run handle_ function, handle callbacks.
title = None
inst.setup()
try:
text = fun(inst, path_or_file)
if _wtitle:
try:
title = inst.handle_title(path_or_file)
except Exception:
LOGGER.exception("error while getting title (setting to None)")
finally:
inst.teardown()
assert text is not None, "backend function returned None"
text = STRIP_WHITE.sub(' ', text)
text = text.strip()
return (text, title)
def get(path_or_file, default=SENTINAL, mime=None, name=None, backend=None,
encoding=None, encoding_errors=None, kwargs=None,
_wtitle=False):
"""
Get document full text.
Accepts a path or file-like object.
* If given, `default` is returned instead of an error.
* `backend` is either a module object or a string specifying which
default backend to use (e.g. "doc"); take a look at backends
directory to see a list of default backends.
* `mime` and `name` should be passed if the information
is available to caller, otherwise a best guess is made.
If both are specified `mime` takes precedence.
* `encoding` and `encoding_errors` are used to handle text encoding.
They are taken into consideration mostly only by pure-python
backends which do not rely on CLI tools.
Default to "utf8" and "strict" respectively.
* `kwargs` are passed to the underlying backend.
"""
try:
text, title = _get(
path_or_file, default=default, mime=mime, name=name,
backend=backend, kwargs=kwargs, encoding=encoding,
encoding_errors=encoding_errors, _wtitle=_wtitle)
if _wtitle:
return (text, title)
else:
return text
except Exception as e:
if default is not SENTINAL:
LOGGER.exception(e)
return default
raise
def get_with_title(*args, **kwargs):
"""Like get() but also tries to determine document title.
Returns a (text, title) tuple.
"""
kwargs['_wtitle'] = True
return get(*args, **kwargs)
|
btimby/fulltext | fulltext/__init__.py | get | python | def get(path_or_file, default=SENTINAL, mime=None, name=None, backend=None,
encoding=None, encoding_errors=None, kwargs=None,
_wtitle=False):
try:
text, title = _get(
path_or_file, default=default, mime=mime, name=name,
backend=backend, kwargs=kwargs, encoding=encoding,
encoding_errors=encoding_errors, _wtitle=_wtitle)
if _wtitle:
return (text, title)
else:
return text
except Exception as e:
if default is not SENTINAL:
LOGGER.exception(e)
return default
raise | Get document full text.
Accepts a path or file-like object.
* If given, `default` is returned instead of an error.
* `backend` is either a module object or a string specifying which
default backend to use (e.g. "doc"); take a look at backends
directory to see a list of default backends.
* `mime` and `name` should be passed if the information
is available to caller, otherwise a best guess is made.
If both are specified `mime` takes precedence.
* `encoding` and `encoding_errors` are used to handle text encoding.
They are taken into consideration mostly only by pure-python
backends which do not rely on CLI tools.
Default to "utf8" and "strict" respectively.
* `kwargs` are passed to the underlying backend. | train | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/__init__.py#L585-L618 | [
"def _get(path_or_file, default, mime, name, backend, encoding,\n encoding_errors, kwargs, _wtitle):\n if encoding is None:\n encoding = ENCODING\n if encoding_errors is None:\n encoding_errors = ENCODING_ERRORS\n\n kwargs = kwargs.copy() if kwargs is not None else {}\n kwargs.setdefault(\"mime\", mime)\n\n # Find backend module.\n if backend is None:\n if mime:\n backend_mod = backend_from_mime(mime)\n elif name:\n backend_mod = backend_from_fname(name)\n else:\n if is_file_path(path_or_file):\n backend_mod = backend_from_fname(path_or_file)\n else:\n if hasattr(path_or_file, \"name\"):\n backend_mod = backend_from_fname(path_or_file.name)\n else:\n backend_mod = backend_from_fobj(path_or_file)\n else:\n if isinstance(backend, string_types):\n try:\n mime = EXTS_TO_MIMETYPES['.' + backend]\n except KeyError:\n raise ValueError(\"invalid backend %r\" % backend)\n backend_mod = backend_from_mime(mime)\n else:\n backend_mod = backend\n\n # Get backend class.\n inst = backend_inst_from_mod(\n backend_mod, encoding, encoding_errors, kwargs)\n fun = handle_path if is_file_path(path_or_file) else handle_fobj\n\n # Run handle_ function, handle callbacks.\n title = None\n inst.setup()\n try:\n text = fun(inst, path_or_file)\n if _wtitle:\n try:\n title = inst.handle_title(path_or_file)\n except Exception:\n LOGGER.exception(\"error while getting title (setting to None)\")\n finally:\n inst.teardown()\n\n assert text is not None, \"backend function returned None\"\n text = STRIP_WHITE.sub(' ', text)\n text = text.strip()\n return (text, title)\n"
] | from __future__ import absolute_import
import errno
import re
import logging
import os
import mimetypes
import sys
from os.path import splitext
from six import string_types
from six import PY3
from fulltext.util import warn
from fulltext.util import magic
from fulltext.util import is_file_path
from fulltext.util import fobj_to_tempfile
from fulltext.util import is_windows
__all__ = ["get", "register_backend"]
# --- overridable defaults
ENCODING = sys.getfilesystemencoding()
ENCODING_ERRORS = "strict"
DEFAULT_MIME = 'application/octet-stream'
# --- others
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.NullHandler())
STRIP_WHITE = re.compile(r'[ \t\v\f\r\n]+')
SENTINAL = object()
MIMETYPE_TO_BACKENDS = {}
EXTS_TO_MIMETYPES = {}
MAGIC_BUFFER_SIZE = 1024
mimetypes.init()
_MIMETYPES_TO_EXT = dict([(v, k) for k, v in mimetypes.types_map.items()])
# A list of extensions which will be treated as pure text.
# This takes precedence over register_backend().
# https://www.openoffice.org/dev_docs/source/file_extensions.html
_TEXT_EXTS = set((
".asm", # Non-UNIX assembler source file
".asp", # Active Server Page
".awk", # An awk script file
".bat", # MS-DOS batch file
".c", # C language file
".class", # Compiled java source code file
".cmd", # Compiler command file
".cpp", # C++ language file
".cxx", # C++ language file
".def", # Win32 library definition file
".dpc", # Source dependency file containing list of dependencies
".dpj", # Java source dependency file containing list of dependencies
".h", # C header file
".hpp", # Generated C++ header or header plus plus file
".hrc", # An ".src", # include header file
".hxx", # C++ header file
".in",
".inc", # Include file
".ini", # Initialization file
".inl", # Inline header file
".jar", # Java classes archive file
".java", # Java language file
".js", # JavaScript code file
".jsp", # Java Server Page file
".kdelnk", # KDE1 configuration file
".l", # Lex source code file
".ll", # Lex source code file
".lnx", # Linux-specific makefile
".log", # Log file
".lst", # ASCII database file used in solenv
".MacOS",
".md", # Markdown language.
".mk", # A dmake makefile
".mod", # BASIC module file
".par", # Script particles file
".pl", # Perl script
".plc", # Former build script file, now obsolete
".pld", # Former build script file, now obsolete
".pm", # Perl module file
".pmk", # Project makefiles
".pre", # Preprocessor output from scpcomp
".py", # Python
".pyx", # Cython
".r", # Resource file for Macintosh
".rc", # A dmake recursive makefile or a Win32 resource script file
".rdb", # Interface and type description database (type library)
".res", # Resource file
".rst", # Restructured text
".s", # Assembler source file (UNIX)
".sbl", # BASIC file
".scp", # Script source file
".sh", # Shell script
".src", # Source resource string file
".txt", # Language text file
".y", # Yacc source code file
".yaml", # Yaml
".yml", # Yaml
".yxx", # Bison source code file
))
# XXX: dirty hack for pyinstaller so that it includes these modules.
# TODO: find a way to do this in pyinstaller.spec instead.
if is_windows() and hasattr(sys, '_MEIPASS'):
from fulltext.backends import __bin # NOQA
from fulltext.backends import __csv # NOQA
from fulltext.backends import __doc # NOQA
from fulltext.backends import __docx # NOQA
from fulltext.backends import __eml # NOQA
from fulltext.backends import __epub # NOQA
from fulltext.backends import __gz # NOQA
from fulltext.backends import __html # NOQA
from fulltext.backends import __hwp # NOQA
from fulltext.backends import __json # NOQA
from fulltext.backends import __mbox # NOQA
# XXX couldn't find a way to install ExtractMessage lib with
# pyinstaller.
# from fulltext.backends import __msg # NOQA
from fulltext.backends import __ocr # NOQA
from fulltext.backends import __odt # NOQA
from fulltext.backends import __pdf # NOQA
from fulltext.backends import __pptx # NOQA
from fulltext.backends import __ps # NOQA
from fulltext.backends import __rar # NOQA
from fulltext.backends import __rtf # NOQA
from fulltext.backends import __text # NOQA
from fulltext.backends import __xlsx # NOQA
from fulltext.backends import __xml # NOQA
from fulltext.backends import __zip # NOQA
# =====================================================================
# --- backends
# =====================================================================
def register_backend(mimetype, module, extensions=None):
"""Register a backend.
`mimetype`: a mimetype string (e.g. 'text/plain')
`module`: an import string (e.g. path.to.my.module)
`extensions`: a list of extensions (e.g. ['txt', 'text'])
"""
if mimetype in MIMETYPE_TO_BACKENDS:
warn("overwriting %r mimetype which was already set" % mimetype)
MIMETYPE_TO_BACKENDS[mimetype] = module
if extensions is None:
try:
ext = _MIMETYPES_TO_EXT[mimetype]
except KeyError:
raise KeyError(
"mimetypes module has no extension associated "
"with %r mimetype; use 'extensions' arg yourself" % mimetype)
assert ext, ext
EXTS_TO_MIMETYPES[ext] = mimetype
else:
if not isinstance(extensions, (list, tuple, set, frozenset)):
raise TypeError("invalid extensions type (got %r)" % extensions)
for ext in set(extensions):
ext = ext if ext.startswith('.') else '.' + ext
assert ext, ext
EXTS_TO_MIMETYPES[ext] = mimetype
register_backend(
'application/zip',
'fulltext.backends.__zip',
extensions=[".zip"])
register_backend(
'application/x-rar-compressed',
'fulltext.backends.__rar',
extensions=['.rar'])
for mt in ("text/xml", "application/xml", "application/x-xml"):
register_backend(
mt,
'fulltext.backends.__xml',
extensions=[".xml", ".xsd"])
register_backend(
'application/vnd.ms-excel',
'fulltext.backends.__xlsx',
extensions=['.xls', '.xlsx'])
register_backend(
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'fulltext.backends.__xlsx',
extensions=['.xlsx'])
register_backend(
'text/plain',
'fulltext.backends.__text',
extensions=['.txt', '.text'])
register_backend(
'application/rtf',
'fulltext.backends.__rtf',
extensions=['.rtf'])
register_backend(
'application/vnd.openxmlformats-officedocument.presentationml.presentation', # NOQA
'fulltext.backends.__pptx',
extensions=['.pptx'])
register_backend(
'application/pdf',
'fulltext.backends.__pdf',
extensions=['.pdf'])
register_backend(
'application/vnd.oasis.opendocument.text',
'fulltext.backends.__odt',
extensions=['.odt'])
register_backend(
'application/vnd.oasis.opendocument.spreadsheet',
'fulltext.backends.__odt',
extensions=['.ods'])
# images
register_backend(
'image/jpeg',
'fulltext.backends.__ocr',
extensions=['.jpg', '.jpeg'])
register_backend(
'image/bmp',
'fulltext.backends.__ocr',
extensions=['.bmp'])
register_backend(
'image/png',
'fulltext.backends.__ocr',
extensions=['.png'])
register_backend(
'image/gif',
'fulltext.backends.__ocr',
extensions=['.gif'])
register_backend(
'application/x-hwp',
'fulltext.backends.__hwp',
extensions=['.hwp'])
for mt in ('text/html', 'application/html', 'text/xhtml'):
register_backend(
mt,
'fulltext.backends.__html',
extensions=['.htm', '.html', '.xhtml'])
register_backend(
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'fulltext.backends.__docx',
extensions=['.docx'])
register_backend(
'application/msword',
'fulltext.backends.__doc',
extensions=['.doc'])
for mt in ('text/csv', 'text/tsv', 'text/psv'):
register_backend(
mt,
'fulltext.backends.__csv',
extensions=['.csv', '.tsv', '.psv', '.tab'])
for mt in ("application/epub", "application/epub+zip"):
register_backend(
mt,
'fulltext.backends.__epub',
extensions=[".epub"])
register_backend(
'application/postscript',
'fulltext.backends.__ps',
extensions=[".ps", ".eps", ".ai"])
register_backend(
'message/rfc822',
'fulltext.backends.__eml',
extensions=['.eml'])
register_backend(
'application/mbox',
'fulltext.backends.__mbox',
extensions=['.mbox'])
register_backend(
'application/vnd.ms-outlook',
'fulltext.backends.__msg',
extensions=['.msg'])
register_backend(
'application/gzip',
'fulltext.backends.__gz',
extensions=['.gz'])
register_backend(
'application/json',
'fulltext.backends.__json',
extensions=['.json'])
# default backend.
register_backend(
'application/octet-stream',
'fulltext.backends.__bin',
extensions=['.a', '.bin'])
# Extensions which will be treated as pure text.
# We just come up with a custom mime name.
for ext in _TEXT_EXTS:
register_backend(
'[custom-fulltext-mime]/%s' % ext,
'fulltext.backends.__text',
extensions=[ext])
# =====================================================================
# --- utils
# =====================================================================
def is_binary(f):
"""Return True if binary mode."""
# NOTE: order matters here. We don't bail on Python 2 just yet. Both
# codecs.open() and io.open() can open in text mode, both set the encoding
# attribute. We must do that check first.
# If it has a decoding attribute with a value, it is text mode.
if getattr(f, "encoding", None):
return False
# Python 2 makes no further distinction.
if not PY3:
return True
# If the file has a mode, and it contains b, it is binary.
try:
if 'b' in getattr(f, 'mode', ''):
return True
except TypeError:
import gzip
if isinstance(f, gzip.GzipFile):
return True # in gzip mode is an integer
raise
# Can we sniff?
try:
f.seek(0, os.SEEK_CUR)
except (AttributeError, IOError):
return False
# Finally, let's sniff by reading a byte.
byte = f.read(1)
f.seek(-1, os.SEEK_CUR)
return hasattr(byte, 'decode')
def handle_path(backend_inst, path, **kwargs):
"""
Handle a path.
Called by `get()` when provided a path. This function will prefer the
backend's `handle_path()` if one is provided Otherwise, it will open the
given path then use `handle_fobj()`.
"""
if callable(getattr(backend_inst, 'handle_path', None)):
# Prefer handle_path() if present.
LOGGER.debug("using handle_path")
return backend_inst.handle_path(path)
elif callable(getattr(backend_inst, 'handle_fobj', None)):
# Fallback to handle_fobj(). No warning here since the performance hit
# is minimal.
LOGGER.debug("using handle_fobj")
with open(path, 'rb') as f:
return backend_inst.handle_fobj(f)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend_inst.__name__)
def handle_fobj(backend, f, **kwargs):
"""
Handle a file-like object.
Called by `get()` when provided a file-like. This function will prefer the
backend's `handle_fobj()` if one is provided. Otherwise, it will write the
data to a temporary file and call `handle_path()`.
"""
if not is_binary(f):
raise AssertionError('File must be opened in binary mode.')
if callable(getattr(backend, 'handle_fobj', None)):
# Prefer handle_fobj() if present.
LOGGER.debug("using handle_fobj")
return backend.handle_fobj(f)
elif callable(getattr(backend, 'handle_path', None)):
# Fallback to handle_path(). Warn user since this is potentially
# expensive.
LOGGER.debug("using handle_path")
LOGGER.warning(
"Using disk, %r backend does not provide `handle_fobj()`", backend)
ext = ''
if 'ext' in kwargs:
ext = '.' + kwargs['ext']
with fobj_to_tempfile(f, suffix=ext) as fname:
return backend.handle_path(fname, **kwargs)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend.__name__)
def import_mod(mod_name):
return __import__(mod_name, fromlist=[' '])
def backend_from_mime(mime):
"""Determine backend module object from a mime string."""
try:
mod_name = MIMETYPE_TO_BACKENDS[mime]
except KeyError:
msg = "No handler for %r, defaulting to %r" % (mime, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
mod = import_mod(mod_name)
return mod
def backend_from_fname(name):
"""Determine backend module object from a file name."""
ext = splitext(name)[1]
try:
mime = EXTS_TO_MIMETYPES[ext]
except KeyError:
try:
f = open(name, 'rb')
except IOError as e:
# The file may not exist, we are being asked to determine it's type
# from it's name. Other errors are unexpected.
if e.errno != errno.ENOENT:
raise
# We will have to fall back upon the default backend.
msg = "No handler for %r, defaulting to %r" % (ext, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
else:
with f:
return backend_from_fobj(f)
else:
mod_name = MIMETYPE_TO_BACKENDS[mime]
mod = import_mod(mod_name)
return mod
def backend_from_fobj(f):
"""Determine backend module object from a file object."""
if magic is None:
warn("magic lib is not installed; assuming mime type %r" % (
DEFAULT_MIME))
return backend_from_mime(DEFAULT_MIME)
else:
offset = f.tell()
try:
f.seek(0)
chunk = f.read(MAGIC_BUFFER_SIZE)
mime = magic.from_buffer(chunk, mime=True)
return backend_from_mime(mime)
finally:
f.seek(offset)
def backend_inst_from_mod(mod, encoding, encoding_errors, kwargs):
"""Given a mod and a set of opts return an instantiated
Backend class.
"""
kw = dict(encoding=encoding, encoding_errors=encoding_errors,
kwargs=kwargs)
try:
klass = getattr(mod, "Backend")
except AttributeError:
raise AttributeError("%r mod does not define any backend class" % mod)
inst = klass(**kw)
try:
inst.check(title=False)
except Exception as err:
bin_mod = "fulltext.backends.__bin"
warn("can't use %r due to %r; use %r backend instead" % (
mod, str(err), bin_mod))
inst = import_mod(bin_mod).Backend(**kw)
inst.check(title=False)
LOGGER.debug("using %r" % inst)
return inst
# =====================================================================
# --- public API
# =====================================================================
def _get(path_or_file, default, mime, name, backend, encoding,
encoding_errors, kwargs, _wtitle):
if encoding is None:
encoding = ENCODING
if encoding_errors is None:
encoding_errors = ENCODING_ERRORS
kwargs = kwargs.copy() if kwargs is not None else {}
kwargs.setdefault("mime", mime)
# Find backend module.
if backend is None:
if mime:
backend_mod = backend_from_mime(mime)
elif name:
backend_mod = backend_from_fname(name)
else:
if is_file_path(path_or_file):
backend_mod = backend_from_fname(path_or_file)
else:
if hasattr(path_or_file, "name"):
backend_mod = backend_from_fname(path_or_file.name)
else:
backend_mod = backend_from_fobj(path_or_file)
else:
if isinstance(backend, string_types):
try:
mime = EXTS_TO_MIMETYPES['.' + backend]
except KeyError:
raise ValueError("invalid backend %r" % backend)
backend_mod = backend_from_mime(mime)
else:
backend_mod = backend
# Get backend class.
inst = backend_inst_from_mod(
backend_mod, encoding, encoding_errors, kwargs)
fun = handle_path if is_file_path(path_or_file) else handle_fobj
# Run handle_ function, handle callbacks.
title = None
inst.setup()
try:
text = fun(inst, path_or_file)
if _wtitle:
try:
title = inst.handle_title(path_or_file)
except Exception:
LOGGER.exception("error while getting title (setting to None)")
finally:
inst.teardown()
assert text is not None, "backend function returned None"
text = STRIP_WHITE.sub(' ', text)
text = text.strip()
return (text, title)
def get_with_title(*args, **kwargs):
"""Like get() but also tries to determine document title.
Returns a (text, title) tuple.
"""
kwargs['_wtitle'] = True
return get(*args, **kwargs)
|
btimby/fulltext | fulltext/util.py | memoize | python | def memoize(fun):
@functools.wraps(fun)
def wrapper(*args, **kwargs):
key = (args, frozenset(sorted(kwargs.items())))
try:
return cache[key]
except KeyError:
ret = cache[key] = fun(*args, **kwargs)
return ret
def cache_clear():
"""Clear cache."""
cache.clear()
cache = {}
wrapper.cache_clear = cache_clear
return wrapper | A simple memoize decorator for functions supporting (hashable)
positional arguments.
It also provides a cache_clear() function for clearing the cache:
>>> @memoize
... def foo()
... return 1
...
>>> foo()
1
>>> foo.cache_clear()
>>> | train | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/util.py#L209-L238 | null | from __future__ import print_function
import contextlib
import atexit
import errno
import logging
import os
import subprocess
import warnings
import sys
import functools
import tempfile
import shutil
from os.path import join as pathjoin
import six
from six import PY3
try:
import exiftool
except ImportError:
exiftool = None
from fulltext.compat import which
LOGGER = logging.getLogger(__file__)
LOGGER.addHandler(logging.NullHandler())
TEMPDIR = os.environ.get('FULLTEXT_TEMP', tempfile.gettempdir())
HERE = os.path.abspath(os.path.dirname(__file__))
class BackendError(AssertionError):
pass
class CommandLineError(Exception):
"""The traceback of all CommandLineError's is supressed when the
errors occur on the command line to provide a useful command line
interface.
"""
def render(self, msg):
return msg % vars(self)
class MissingCommandException(CommandLineError):
def __init__(self, cmd, msg=""):
self.cmd = cmd
self.msg = msg
def __str__(self):
if self.msg:
return self.msg
else:
return "%r CLI tool is not installed" % self.cmd
class ShellError(CommandLineError):
"""This error is raised when a shell.run returns a non-zero exit code
(meaning the command failed).
"""
def __init__(self, command, exit_code, stdout, stderr):
self.command = command
self.exit_code = exit_code
self.stdout = stdout
self.stderr = stderr
self.executable = self.command.split()[0]
def failed_message(self):
return (
"The command `%(command)s` failed with exit code %(exit_code)d\n"
"------------- stdout -------------\n"
"%(stdout)s"
"------------- stderr -------------\n"
"%(stderr)s"
) % vars(self)
def __str__(self):
return self.failed_message()
def run(*cmd, **kwargs):
stdin = kwargs.get('stdin', None)
# run a subprocess and put the stdout and stderr on the pipe object
try:
pipe = subprocess.Popen(
cmd,
stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
)
except IOError as e:
if e.errno == errno.ENOENT:
raise MissingCommandException(cmd[0])
raise
except OSError as e:
if e.errno == errno.ENOENT:
# File not found.
# This is equivalent to getting exitcode 127 from sh
raise MissingCommandException(cmd[0])
try:
# pipe.wait() ends up hanging on large files. using
# pipe.communicate appears to avoid this issue
stdout, stderr = pipe.communicate()
if stderr:
if PY3:
warn(stderr.decode(sys.getfilesystemencoding(), "ignore"))
else:
warn(stderr)
# if pipe is busted, raise an error (unlike Fabric)
if pipe.returncode != 0:
raise ShellError(' '.join(cmd), pipe.returncode, stdout, stderr)
return stdout
finally:
if pipe.stdout:
pipe.stdout.close()
if pipe.stderr:
pipe.stderr.close()
try: # Flushing a BufferedWriter may raise an error
if pipe.stdin:
pipe.stdin.close()
finally:
# Wait for the process to terminate, to avoid zombies.
pipe.wait()
def warn(msg):
warnings.warn(msg, UserWarning, stacklevel=2)
LOGGER.warning(msg)
def is_windows():
"""True if the platform is Windows."""
return os.name == 'nt'
def is_windows64():
"""
Determine if platform is 64 bit Windows.
"""
return is_windows() and 'PROGRAMFILES(X86)' in os.environ
def get_data_dir():
# When running under PyInstaller things are a bit different.
if hasattr(sys, '_MEIPASS'):
path = pathjoin(sys._MEIPASS, 'fulltext', 'data')
# XXX: this absolutely ugly hack is needed in order to build
# duster with pyinstaller.
if not os.path.isdir(path):
print(">>> WARN: assuming you're using pyinstaller from duster",
file=sys.stderr)
path = pathjoin(sys._MEIPASS, 'duster', 'data')
else:
path = pathjoin(HERE, 'data')
assert os.path.isdir(path), path
return path
def assert_cmd_exists(cmd):
if not which(cmd):
raise MissingCommandException(cmd)
if not is_windows():
# On linux things are simpler. Linter disabled for next line since we
# import here for export.
import magic # NOQA
else:
def _set_binpath():
# Help the magic wrapper locate magic1.dll, we include it in
# bin/bin{32,64}.
bindir = 'bin64' if is_windows64() else 'bin32'
path = pathjoin(get_data_dir(), bindir)
os.environ['PATH'] += os.pathsep + path
assert_cmd_exists("pdftotext")
assert_cmd_exists("unrtf")
assert_cmd_exists("exiftool")
assert_cmd_exists("unrar")
_set_binpath()
def _import_magic():
# Instantiate our own Magic instance so we can tell it where the
# magic file lives.
from magic import Magic as _Magic
class Magic(_Magic):
# Overridden because differently from the UNIX version
# the Windows version does not provide mime kwarg.
def from_file(self, filename, mime=True):
return _Magic.from_file(self, filename)
def from_buffer(self, buf, mime=True):
return _Magic.from_buffer(self, buf)
path = pathjoin(get_data_dir(), 'magic')
assert os.path.isfile(path), path
return Magic(mime=True, magic_file=path)
magic = _import_magic()
@memoize
def term_supports_colors():
try:
import curses
assert sys.stderr.isatty()
curses.setupterm()
assert curses.tigetnum("colors") > 0
except Exception:
return False
else:
return True
def hilite(s, ok=True, bold=False):
"""Return an highlighted version of 'string'."""
if not term_supports_colors():
return s
attr = []
if ok is None: # no color
pass
elif ok: # green
attr.append('32')
else: # red
attr.append('31')
if bold:
attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), s)
def is_file_path(obj):
"""Return True if obj is a possible file path or name."""
return isinstance(obj, six.string_types) or isinstance(obj, bytes)
@contextlib.contextmanager
def fobj_to_tempfile(f, suffix=''):
"""Context manager which copies a file object to disk and return its
name. When done the file is deleted.
"""
with tempfile.NamedTemporaryFile(
dir=TEMPDIR, suffix=suffix, delete=False) as t:
shutil.copyfileobj(f, t)
try:
yield t.name
finally:
os.remove(t.name)
if exiftool is not None:
_et = exiftool.ExifTool()
_et.start()
@atexit.register
def _close_et():
LOGGER.debug("terminating exiftool subprocess")
_et.terminate()
def exiftool_title(path, encoding, encoding_error):
if is_file_path(path):
title = (_et.get_tag("title", path) or "").strip()
if title:
if hasattr(title, "decode"): # PY2
return title.decode(encoding, encoding_error)
else:
return title
else:
# TODO: according to https://www.sno.phy.queensu.ca/~phil/exiftool/
# exiftool is also available on Windows
def exiftool_title(*a, **kw):
return None
class BaseBackend(object):
"""Base class for defining custom backend classes."""
def __init__(self, encoding, encoding_errors, kwargs):
"""These are the same args passed to get() function."""
self.encoding = encoding
self.encoding_errors = encoding_errors
self.kwargs = kwargs
def setup(self):
"""May be overridden by subclass. This is called before handle_
methods.
"""
pass
def teardown(self):
"""May be overridden by subclass. This is called after text
is extracted, also in case of exception.
"""
pass
def check(self, title):
"""May be overridden by subclass. This is called before text
extraction. If the overriding method raises an exception
a warning is printed and bin backend is used.
"""
pass
def decode(self, s):
"""Decode string."""
return s.decode(self.encoding, self.encoding_errors)
def handle_title(self, path_or_file):
"""May be overridden by sublass in order to retrieve file title."""
return None
|
btimby/fulltext | fulltext/util.py | hilite | python | def hilite(s, ok=True, bold=False):
if not term_supports_colors():
return s
attr = []
if ok is None: # no color
pass
elif ok: # green
attr.append('32')
else: # red
attr.append('31')
if bold:
attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), s) | Return an highlighted version of 'string'. | train | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/util.py#L254-L267 | null | from __future__ import print_function
import contextlib
import atexit
import errno
import logging
import os
import subprocess
import warnings
import sys
import functools
import tempfile
import shutil
from os.path import join as pathjoin
import six
from six import PY3
try:
import exiftool
except ImportError:
exiftool = None
from fulltext.compat import which
LOGGER = logging.getLogger(__file__)
LOGGER.addHandler(logging.NullHandler())
TEMPDIR = os.environ.get('FULLTEXT_TEMP', tempfile.gettempdir())
HERE = os.path.abspath(os.path.dirname(__file__))
class BackendError(AssertionError):
pass
class CommandLineError(Exception):
"""The traceback of all CommandLineError's is supressed when the
errors occur on the command line to provide a useful command line
interface.
"""
def render(self, msg):
return msg % vars(self)
class MissingCommandException(CommandLineError):
def __init__(self, cmd, msg=""):
self.cmd = cmd
self.msg = msg
def __str__(self):
if self.msg:
return self.msg
else:
return "%r CLI tool is not installed" % self.cmd
class ShellError(CommandLineError):
"""This error is raised when a shell.run returns a non-zero exit code
(meaning the command failed).
"""
def __init__(self, command, exit_code, stdout, stderr):
self.command = command
self.exit_code = exit_code
self.stdout = stdout
self.stderr = stderr
self.executable = self.command.split()[0]
def failed_message(self):
return (
"The command `%(command)s` failed with exit code %(exit_code)d\n"
"------------- stdout -------------\n"
"%(stdout)s"
"------------- stderr -------------\n"
"%(stderr)s"
) % vars(self)
def __str__(self):
return self.failed_message()
def run(*cmd, **kwargs):
stdin = kwargs.get('stdin', None)
# run a subprocess and put the stdout and stderr on the pipe object
try:
pipe = subprocess.Popen(
cmd,
stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
)
except IOError as e:
if e.errno == errno.ENOENT:
raise MissingCommandException(cmd[0])
raise
except OSError as e:
if e.errno == errno.ENOENT:
# File not found.
# This is equivalent to getting exitcode 127 from sh
raise MissingCommandException(cmd[0])
try:
# pipe.wait() ends up hanging on large files. using
# pipe.communicate appears to avoid this issue
stdout, stderr = pipe.communicate()
if stderr:
if PY3:
warn(stderr.decode(sys.getfilesystemencoding(), "ignore"))
else:
warn(stderr)
# if pipe is busted, raise an error (unlike Fabric)
if pipe.returncode != 0:
raise ShellError(' '.join(cmd), pipe.returncode, stdout, stderr)
return stdout
finally:
if pipe.stdout:
pipe.stdout.close()
if pipe.stderr:
pipe.stderr.close()
try: # Flushing a BufferedWriter may raise an error
if pipe.stdin:
pipe.stdin.close()
finally:
# Wait for the process to terminate, to avoid zombies.
pipe.wait()
def warn(msg):
warnings.warn(msg, UserWarning, stacklevel=2)
LOGGER.warning(msg)
def is_windows():
"""True if the platform is Windows."""
return os.name == 'nt'
def is_windows64():
"""
Determine if platform is 64 bit Windows.
"""
return is_windows() and 'PROGRAMFILES(X86)' in os.environ
def get_data_dir():
# When running under PyInstaller things are a bit different.
if hasattr(sys, '_MEIPASS'):
path = pathjoin(sys._MEIPASS, 'fulltext', 'data')
# XXX: this absolutely ugly hack is needed in order to build
# duster with pyinstaller.
if not os.path.isdir(path):
print(">>> WARN: assuming you're using pyinstaller from duster",
file=sys.stderr)
path = pathjoin(sys._MEIPASS, 'duster', 'data')
else:
path = pathjoin(HERE, 'data')
assert os.path.isdir(path), path
return path
def assert_cmd_exists(cmd):
if not which(cmd):
raise MissingCommandException(cmd)
if not is_windows():
# On linux things are simpler. Linter disabled for next line since we
# import here for export.
import magic # NOQA
else:
def _set_binpath():
# Help the magic wrapper locate magic1.dll, we include it in
# bin/bin{32,64}.
bindir = 'bin64' if is_windows64() else 'bin32'
path = pathjoin(get_data_dir(), bindir)
os.environ['PATH'] += os.pathsep + path
assert_cmd_exists("pdftotext")
assert_cmd_exists("unrtf")
assert_cmd_exists("exiftool")
assert_cmd_exists("unrar")
_set_binpath()
def _import_magic():
# Instantiate our own Magic instance so we can tell it where the
# magic file lives.
from magic import Magic as _Magic
class Magic(_Magic):
# Overridden because differently from the UNIX version
# the Windows version does not provide mime kwarg.
def from_file(self, filename, mime=True):
return _Magic.from_file(self, filename)
def from_buffer(self, buf, mime=True):
return _Magic.from_buffer(self, buf)
path = pathjoin(get_data_dir(), 'magic')
assert os.path.isfile(path), path
return Magic(mime=True, magic_file=path)
magic = _import_magic()
def memoize(fun):
"""A simple memoize decorator for functions supporting (hashable)
positional arguments.
It also provides a cache_clear() function for clearing the cache:
>>> @memoize
... def foo()
... return 1
...
>>> foo()
1
>>> foo.cache_clear()
>>>
"""
@functools.wraps(fun)
def wrapper(*args, **kwargs):
key = (args, frozenset(sorted(kwargs.items())))
try:
return cache[key]
except KeyError:
ret = cache[key] = fun(*args, **kwargs)
return ret
def cache_clear():
"""Clear cache."""
cache.clear()
cache = {}
wrapper.cache_clear = cache_clear
return wrapper
@memoize
def term_supports_colors():
try:
import curses
assert sys.stderr.isatty()
curses.setupterm()
assert curses.tigetnum("colors") > 0
except Exception:
return False
else:
return True
def is_file_path(obj):
"""Return True if obj is a possible file path or name."""
return isinstance(obj, six.string_types) or isinstance(obj, bytes)
def memoize(fun):
"""A simple memoize decorator for functions supporting (hashable)
positional arguments.
It also provides a cache_clear() function for clearing the cache:
>>> @memoize
... def foo()
... return 1
...
>>> foo()
1
>>> foo.cache_clear()
>>>
"""
@functools.wraps(fun)
def wrapper(*args, **kwargs):
key = (args, frozenset(sorted(kwargs.items())))
try:
return cache[key]
except KeyError:
ret = cache[key] = fun(*args, **kwargs)
return ret
def cache_clear():
"""Clear cache."""
cache.clear()
cache = {}
wrapper.cache_clear = cache_clear
return wrapper
@contextlib.contextmanager
def fobj_to_tempfile(f, suffix=''):
"""Context manager which copies a file object to disk and return its
name. When done the file is deleted.
"""
with tempfile.NamedTemporaryFile(
dir=TEMPDIR, suffix=suffix, delete=False) as t:
shutil.copyfileobj(f, t)
try:
yield t.name
finally:
os.remove(t.name)
if exiftool is not None:
_et = exiftool.ExifTool()
_et.start()
@atexit.register
def _close_et():
LOGGER.debug("terminating exiftool subprocess")
_et.terminate()
def exiftool_title(path, encoding, encoding_error):
if is_file_path(path):
title = (_et.get_tag("title", path) or "").strip()
if title:
if hasattr(title, "decode"): # PY2
return title.decode(encoding, encoding_error)
else:
return title
else:
# TODO: according to https://www.sno.phy.queensu.ca/~phil/exiftool/
# exiftool is also available on Windows
def exiftool_title(*a, **kw):
return None
class BaseBackend(object):
"""Base class for defining custom backend classes."""
def __init__(self, encoding, encoding_errors, kwargs):
"""These are the same args passed to get() function."""
self.encoding = encoding
self.encoding_errors = encoding_errors
self.kwargs = kwargs
def setup(self):
"""May be overridden by subclass. This is called before handle_
methods.
"""
pass
def teardown(self):
"""May be overridden by subclass. This is called after text
is extracted, also in case of exception.
"""
pass
def check(self, title):
"""May be overridden by subclass. This is called before text
extraction. If the overriding method raises an exception
a warning is printed and bin backend is used.
"""
pass
def decode(self, s):
"""Decode string."""
return s.decode(self.encoding, self.encoding_errors)
def handle_title(self, path_or_file):
"""May be overridden by sublass in order to retrieve file title."""
return None
|
btimby/fulltext | fulltext/util.py | fobj_to_tempfile | python | def fobj_to_tempfile(f, suffix=''):
with tempfile.NamedTemporaryFile(
dir=TEMPDIR, suffix=suffix, delete=False) as t:
shutil.copyfileobj(f, t)
try:
yield t.name
finally:
os.remove(t.name) | Context manager which copies a file object to disk and return its
name. When done the file is deleted. | train | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/util.py#L308-L318 | null | from __future__ import print_function
import contextlib
import atexit
import errno
import logging
import os
import subprocess
import warnings
import sys
import functools
import tempfile
import shutil
from os.path import join as pathjoin
import six
from six import PY3
try:
import exiftool
except ImportError:
exiftool = None
from fulltext.compat import which
LOGGER = logging.getLogger(__file__)
LOGGER.addHandler(logging.NullHandler())
TEMPDIR = os.environ.get('FULLTEXT_TEMP', tempfile.gettempdir())
HERE = os.path.abspath(os.path.dirname(__file__))
class BackendError(AssertionError):
pass
class CommandLineError(Exception):
"""The traceback of all CommandLineError's is supressed when the
errors occur on the command line to provide a useful command line
interface.
"""
def render(self, msg):
return msg % vars(self)
class MissingCommandException(CommandLineError):
def __init__(self, cmd, msg=""):
self.cmd = cmd
self.msg = msg
def __str__(self):
if self.msg:
return self.msg
else:
return "%r CLI tool is not installed" % self.cmd
class ShellError(CommandLineError):
"""This error is raised when a shell.run returns a non-zero exit code
(meaning the command failed).
"""
def __init__(self, command, exit_code, stdout, stderr):
self.command = command
self.exit_code = exit_code
self.stdout = stdout
self.stderr = stderr
self.executable = self.command.split()[0]
def failed_message(self):
return (
"The command `%(command)s` failed with exit code %(exit_code)d\n"
"------------- stdout -------------\n"
"%(stdout)s"
"------------- stderr -------------\n"
"%(stderr)s"
) % vars(self)
def __str__(self):
return self.failed_message()
def run(*cmd, **kwargs):
stdin = kwargs.get('stdin', None)
# run a subprocess and put the stdout and stderr on the pipe object
try:
pipe = subprocess.Popen(
cmd,
stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
)
except IOError as e:
if e.errno == errno.ENOENT:
raise MissingCommandException(cmd[0])
raise
except OSError as e:
if e.errno == errno.ENOENT:
# File not found.
# This is equivalent to getting exitcode 127 from sh
raise MissingCommandException(cmd[0])
try:
# pipe.wait() ends up hanging on large files. using
# pipe.communicate appears to avoid this issue
stdout, stderr = pipe.communicate()
if stderr:
if PY3:
warn(stderr.decode(sys.getfilesystemencoding(), "ignore"))
else:
warn(stderr)
# if pipe is busted, raise an error (unlike Fabric)
if pipe.returncode != 0:
raise ShellError(' '.join(cmd), pipe.returncode, stdout, stderr)
return stdout
finally:
if pipe.stdout:
pipe.stdout.close()
if pipe.stderr:
pipe.stderr.close()
try: # Flushing a BufferedWriter may raise an error
if pipe.stdin:
pipe.stdin.close()
finally:
# Wait for the process to terminate, to avoid zombies.
pipe.wait()
def warn(msg):
warnings.warn(msg, UserWarning, stacklevel=2)
LOGGER.warning(msg)
def is_windows():
"""True if the platform is Windows."""
return os.name == 'nt'
def is_windows64():
"""
Determine if platform is 64 bit Windows.
"""
return is_windows() and 'PROGRAMFILES(X86)' in os.environ
def get_data_dir():
# When running under PyInstaller things are a bit different.
if hasattr(sys, '_MEIPASS'):
path = pathjoin(sys._MEIPASS, 'fulltext', 'data')
# XXX: this absolutely ugly hack is needed in order to build
# duster with pyinstaller.
if not os.path.isdir(path):
print(">>> WARN: assuming you're using pyinstaller from duster",
file=sys.stderr)
path = pathjoin(sys._MEIPASS, 'duster', 'data')
else:
path = pathjoin(HERE, 'data')
assert os.path.isdir(path), path
return path
def assert_cmd_exists(cmd):
if not which(cmd):
raise MissingCommandException(cmd)
if not is_windows():
# On linux things are simpler. Linter disabled for next line since we
# import here for export.
import magic # NOQA
else:
def _set_binpath():
# Help the magic wrapper locate magic1.dll, we include it in
# bin/bin{32,64}.
bindir = 'bin64' if is_windows64() else 'bin32'
path = pathjoin(get_data_dir(), bindir)
os.environ['PATH'] += os.pathsep + path
assert_cmd_exists("pdftotext")
assert_cmd_exists("unrtf")
assert_cmd_exists("exiftool")
assert_cmd_exists("unrar")
_set_binpath()
def _import_magic():
# Instantiate our own Magic instance so we can tell it where the
# magic file lives.
from magic import Magic as _Magic
class Magic(_Magic):
# Overridden because differently from the UNIX version
# the Windows version does not provide mime kwarg.
def from_file(self, filename, mime=True):
return _Magic.from_file(self, filename)
def from_buffer(self, buf, mime=True):
return _Magic.from_buffer(self, buf)
path = pathjoin(get_data_dir(), 'magic')
assert os.path.isfile(path), path
return Magic(mime=True, magic_file=path)
magic = _import_magic()
def memoize(fun):
"""A simple memoize decorator for functions supporting (hashable)
positional arguments.
It also provides a cache_clear() function for clearing the cache:
>>> @memoize
... def foo()
... return 1
...
>>> foo()
1
>>> foo.cache_clear()
>>>
"""
@functools.wraps(fun)
def wrapper(*args, **kwargs):
key = (args, frozenset(sorted(kwargs.items())))
try:
return cache[key]
except KeyError:
ret = cache[key] = fun(*args, **kwargs)
return ret
def cache_clear():
"""Clear cache."""
cache.clear()
cache = {}
wrapper.cache_clear = cache_clear
return wrapper
@memoize
def term_supports_colors():
try:
import curses
assert sys.stderr.isatty()
curses.setupterm()
assert curses.tigetnum("colors") > 0
except Exception:
return False
else:
return True
def hilite(s, ok=True, bold=False):
"""Return an highlighted version of 'string'."""
if not term_supports_colors():
return s
attr = []
if ok is None: # no color
pass
elif ok: # green
attr.append('32')
else: # red
attr.append('31')
if bold:
attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), s)
def is_file_path(obj):
"""Return True if obj is a possible file path or name."""
return isinstance(obj, six.string_types) or isinstance(obj, bytes)
def memoize(fun):
"""A simple memoize decorator for functions supporting (hashable)
positional arguments.
It also provides a cache_clear() function for clearing the cache:
>>> @memoize
... def foo()
... return 1
...
>>> foo()
1
>>> foo.cache_clear()
>>>
"""
@functools.wraps(fun)
def wrapper(*args, **kwargs):
key = (args, frozenset(sorted(kwargs.items())))
try:
return cache[key]
except KeyError:
ret = cache[key] = fun(*args, **kwargs)
return ret
def cache_clear():
"""Clear cache."""
cache.clear()
cache = {}
wrapper.cache_clear = cache_clear
return wrapper
@contextlib.contextmanager
if exiftool is not None:
_et = exiftool.ExifTool()
_et.start()
@atexit.register
def _close_et():
LOGGER.debug("terminating exiftool subprocess")
_et.terminate()
def exiftool_title(path, encoding, encoding_error):
if is_file_path(path):
title = (_et.get_tag("title", path) or "").strip()
if title:
if hasattr(title, "decode"): # PY2
return title.decode(encoding, encoding_error)
else:
return title
else:
# TODO: according to https://www.sno.phy.queensu.ca/~phil/exiftool/
# exiftool is also available on Windows
def exiftool_title(*a, **kw):
return None
class BaseBackend(object):
"""Base class for defining custom backend classes."""
def __init__(self, encoding, encoding_errors, kwargs):
"""These are the same args passed to get() function."""
self.encoding = encoding
self.encoding_errors = encoding_errors
self.kwargs = kwargs
def setup(self):
"""May be overridden by subclass. This is called before handle_
methods.
"""
pass
def teardown(self):
"""May be overridden by subclass. This is called after text
is extracted, also in case of exception.
"""
pass
def check(self, title):
"""May be overridden by subclass. This is called before text
extraction. If the overriding method raises an exception
a warning is printed and bin backend is used.
"""
pass
def decode(self, s):
"""Decode string."""
return s.decode(self.encoding, self.encoding_errors)
def handle_title(self, path_or_file):
"""May be overridden by sublass in order to retrieve file title."""
return None
|
btimby/fulltext | fulltext/data/winmake.py | safe_print | python | def safe_print(text, file=sys.stdout, flush=False):
if not isinstance(text, basestring):
return print(text, file=file)
try:
file.write(text)
except UnicodeEncodeError:
bytes_string = text.encode(file.encoding, 'backslashreplace')
if hasattr(file, 'buffer'):
file.buffer.write(bytes_string)
else:
text = bytes_string.decode(file.encoding, 'strict')
file.write(text)
file.write("\n") | Prints a (unicode) string to the console, encoded depending on
the stdout/file encoding (eg. cp437 on Windows). This is to avoid
encoding errors in case of funky path names.
Works with Python 2 and 3. | train | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/data/winmake.py#L63-L80 | null | #!/usr/bin/env python
# Copyright (c) 2009 Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Shortcuts for various tasks, emulating UNIX "make" on Windows.
This is supposed to be invoked by "make.bat" and not used directly.
This was originally written as a bat file but they suck so much
that they should be deemed illegal!
"""
from __future__ import print_function
import errno
import glob
import functools
import os
import shutil
import site
import subprocess
import sys
# --- configurable
PRJNAME = "fulltext"
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_SCRIPT = 'fulltext\\test\\__init__.py'
ROOT_DIR = os.path.realpath(os.path.join(HERE, "..", ".."))
DATA_DIR = os.path.join(ROOT_DIR, PRJNAME, "data")
REQUIREMENTS_TXT = "requirements.txt"
# --- others
TEXT_WITH_NEWLINES = u"Lorem ipsum\ndolor sit amet, consectetur adipiscing e" \
u"lit. Nunc ipsum augue, iaculis quis\nauctor eu, adipi" \
u"scing non est. Nullam id sem diam, eget varius dui. E" \
u"tiam\nsollicitudin sapien nec odio elementum sit amet" \
u" luctus magna volutpat. Ut\ncommodo nulla neque. Aliq" \
u"uam erat volutpat. Integer et nunc augue.\nPellentesq" \
u"ue habitant morbi tristique senectus et netus et male" \
u"suada fames\nac turpis egestas. Quisque at enim nulla" \
u", vel tincidunt urna. Nam leo\naugue, elementum ut vi" \
u"verra eget, scelerisque in purus. In arcu orci, porta" \
u"\nnec aliquet quis, pretium a sem. In fermentum nisl " \
u"id diam luctus viverra.\nNullam semper, metus at euis" \
u"mod vulputate, orci odio dignissim urna, quis\niaculi" \
u"s neque lacus ut tortor. Ut a justo non dolor venenat" \
u"is accumsan.\nProin dolor eros, aliquam id condimentu" \
u"m et, aliquam quis metus. Vivamus\neget purus diam."
TEXT = TEXT_WITH_NEWLINES.replace('\n', ' ')
PYTHON = sys.executable
PY3 = sys.version_info[0] == 3
_cmds = {}
if PY3:
basestring = str
# ===================================================================
# utils
# ===================================================================
def sh(cmd, nolog=False):
if not nolog:
safe_print("cmd: " + cmd)
p = subprocess.Popen(cmd, shell=True, env=os.environ, cwd=os.getcwd(),
stdout=subprocess.PIPE)
out, _ = p.communicate()
if PY3:
out = out.decode(sys.stdout.encoding, sys.stdout.errors)
print(out)
if p.returncode != 0:
sys.exit(p.returncode)
return out
def cmd(fun):
@functools.wraps(fun)
def wrapper(*args, **kwds):
return fun(*args, **kwds)
_cmds[fun.__name__] = fun.__doc__
return wrapper
def rm(pattern):
"""Recursively remove a file or dir by pattern."""
paths = glob.glob(pattern)
for path in paths:
if path.startswith('.git/'):
continue
if os.path.isdir(path):
def onerror(fun, path, excinfo):
exc = excinfo[1]
if exc.errno != errno.ENOENT:
raise
safe_print("rmdir -f %s" % path)
shutil.rmtree(path, onerror=onerror)
else:
safe_print("rm %s" % path)
os.remove(path)
def test_setup():
os.environ['PYTHONWARNINGS'] = 'all'
def install_pip():
try:
import pip # NOQA
except ImportError:
sh("%s %s" % (PYTHON,
os.path.join(DATA_DIR, "get-pip.py")))
def install_setuptools():
try:
import setuptools # NOQA
except ImportError:
sh('%s -c "import setuptools"' % PYTHON)
# ===================================================================
# commands
# ===================================================================
@cmd
def help():
"""Print this help"""
safe_print('Run "make [-p <PYTHON>] <target>" where <target> is one of:')
for name in sorted(_cmds):
safe_print(
" %-20s %s" % (name.replace('_', '-'), _cmds[name] or ''))
sys.exit(1)
@cmd
def build():
"""Build / compile"""
# Make sure setuptools is installed (needed for 'develop' /
# edit mode).
install_setuptools()
sh("%s setup.py build" % PYTHON)
sh("%s setup.py build_ext -i" % PYTHON)
sh('%s -c "import %s"' % (PYTHON, PRJNAME))
@cmd
def install():
"""Install in develop / edit mode"""
build()
sh("%s setup.py develop" % PYTHON)
@cmd
def uninstall():
"""Uninstall %s""" % PRJNAME
clean()
install_pip()
here = os.getcwd()
try:
os.chdir('C:\\')
while True:
try:
__import__(PRJNAME, fromlist=[' '])
except ImportError:
break
else:
sh("%s -m pip uninstall -y %s" % (PYTHON, PRJNAME))
finally:
os.chdir(here)
for dir in site.getsitepackages():
for name in os.listdir(dir):
if name.startswith(PRJNAME):
rm(os.path.join(dir, name))
@cmd
def clean():
"""Deletes dev files"""
rm("$testfn*")
rm("*.bak")
rm("*.core")
rm("*.egg-info")
rm("*.orig")
rm("*.pyc")
rm("*.pyd")
rm("*.pyo")
rm("*.rej")
rm("*.so")
rm("*.~")
rm("*__pycache__")
rm(".coverage")
rm(".tox")
rm(".coverage")
rm("build")
rm("dist")
rm("docs/_build")
rm("htmlcov")
rm("tmp")
rm("venv")
@cmd
def pydeps():
"""Install useful deps"""
install_pip()
install_setuptools()
sh("%s -m pip install -U -r %s" % (PYTHON, REQUIREMENTS_TXT))
@cmd
def lint():
"""Run flake8 against all py files"""
py_files = subprocess.check_output("git ls-files")
if PY3:
py_files = py_files.decode()
py_files = [x for x in py_files.split() if x.endswith('.py')]
py_files = ' '.join(py_files)
sh("%s -m flake8 %s" % (PYTHON, py_files), nolog=True)
@cmd
def test():
"""Run tests"""
install()
test_setup()
sh("%s %s" % (PYTHON, TEST_SCRIPT))
@cmd
def ci():
"""Run CI tests."""
pydeps()
test()
pyinstaller()
@cmd
def coverage():
"""Run coverage tests."""
# Note: coverage options are controlled by .coveragerc file
install()
test_setup()
sh("%s -m coverage run %s" % (PYTHON, TEST_SCRIPT))
sh("%s -m coverage report" % PYTHON)
sh("%s -m coverage html" % PYTHON)
sh("%s -m webbrowser -t htmlcov/index.html" % PYTHON)
@cmd
def test_by_name():
"""Run test by name"""
try:
safe_print(sys.argv)
name = sys.argv[2]
except IndexError:
sys.exit('second arg missing')
install()
test_setup()
sh("%s -m unittest -v %s" % (PYTHON, name))
def set_python(s):
global PYTHON
if os.path.isabs(s):
PYTHON = s
else:
# try to look for a python installation
orig = s
s = s.replace('.', '')
vers = ('26', '27', '34', '35', '36', '37',
'26-64', '27-64', '34-64', '35-64', '36-64', '37-64')
for v in vers:
if s == v:
path = 'C:\\python%s\\python.exe' % s
if os.path.isfile(path):
print(path)
PYTHON = path
os.putenv('PYTHON', path)
return
return sys.exit(
"can't find any python installation matching %r" % orig)
def is_windows64():
return 'PROGRAMFILES(X86)' in os.environ
def venv():
"""Install venv + deps."""
try:
import virtualenv # NOQA
except ImportError:
sh("%s -m pip install virtualenv" % PYTHON)
if not os.path.isdir("venv"):
sh("%s -m virtualenv venv" % PYTHON)
sh("venv\\Scripts\\pip install -r %s" % (REQUIREMENTS_TXT))
@cmd
def pyinstaller():
"""Creates a stand alone Windows as dist/%s.exe.""" % PRJNAME
def assertMultiLineEqual(a, b):
import unittest
tc = unittest.TestCase('__init__')
tc.assertMultiLineEqual(a, b)
def install_deps():
sh("venv\\Scripts\\python -m pip install pyinstaller pypiwin32")
sh("venv\\Scripts\\python -m pip install "
"https://github.com/mattgwwalker/msg-extractor/zipball/"
"master#egg=ExtractMsg")
sh("venv\\Scripts\\python setup.py install")
def run_pyinstaller():
rm(os.path.join(ROOT_DIR, "dist"))
bindir = os.path.join(
DATA_DIR, "bin64" if is_windows64() else "bin32")
assert os.path.exists(bindir), bindir
sh("venv\\Scripts\\pyinstaller --upx-dir=%s pyinstaller.spec" % bindir)
def test_exe():
# Make sure the resulting .exe works.
exe = os.path.join(ROOT_DIR, "dist", "%s.exe" % PRJNAME)
assert os.path.exists(exe), exe
# Test those extensions for which we know we rely on external exes.
out = sh("%s extract %s" % (
exe, os.path.join(ROOT_DIR, "fulltext/test/files/test.pdf")))
assertMultiLineEqual(out.strip(), TEXT.strip())
out = sh("%s extract %s" % (
exe, os.path.join(ROOT_DIR, "fulltext/test/files/test.rtf")))
assertMultiLineEqual(out.strip(), TEXT.strip())
venv()
install_deps()
run_pyinstaller()
test_exe()
def parse_cmdline():
if '-p' in sys.argv:
try:
pos = sys.argv.index('-p')
sys.argv.pop(pos)
py = sys.argv.pop(pos)
except IndexError:
return help()
set_python(py)
def main():
parse_cmdline()
try:
cmd = sys.argv[1].replace('-', '_')
except IndexError:
return help()
if cmd in _cmds:
fun = getattr(sys.modules[__name__], cmd)
fun()
else:
help()
if __name__ == '__main__':
main()
|
btimby/fulltext | fulltext/data/winmake.py | rm | python | def rm(pattern):
paths = glob.glob(pattern)
for path in paths:
if path.startswith('.git/'):
continue
if os.path.isdir(path):
def onerror(fun, path, excinfo):
exc = excinfo[1]
if exc.errno != errno.ENOENT:
raise
safe_print("rmdir -f %s" % path)
shutil.rmtree(path, onerror=onerror)
else:
safe_print("rm %s" % path)
os.remove(path) | Recursively remove a file or dir by pattern. | train | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/data/winmake.py#L106-L122 | [
"def safe_print(text, file=sys.stdout, flush=False):\n \"\"\"Prints a (unicode) string to the console, encoded depending on\n the stdout/file encoding (eg. cp437 on Windows). This is to avoid\n encoding errors in case of funky path names.\n Works with Python 2 and 3.\n \"\"\"\n if not isinstance(text, basestring):\n return print(text, file=file)\n try:\n file.write(text)\n except UnicodeEncodeError:\n bytes_string = text.encode(file.encoding, 'backslashreplace')\n if hasattr(file, 'buffer'):\n file.buffer.write(bytes_string)\n else:\n text = bytes_string.decode(file.encoding, 'strict')\n file.write(text)\n file.write(\"\\n\")\n"
] | #!/usr/bin/env python
# Copyright (c) 2009 Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Shortcuts for various tasks, emulating UNIX "make" on Windows.
This is supposed to be invoked by "make.bat" and not used directly.
This was originally written as a bat file but they suck so much
that they should be deemed illegal!
"""
from __future__ import print_function
import errno
import glob
import functools
import os
import shutil
import site
import subprocess
import sys
# --- configurable
PRJNAME = "fulltext"
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_SCRIPT = 'fulltext\\test\\__init__.py'
ROOT_DIR = os.path.realpath(os.path.join(HERE, "..", ".."))
DATA_DIR = os.path.join(ROOT_DIR, PRJNAME, "data")
REQUIREMENTS_TXT = "requirements.txt"
# --- others
TEXT_WITH_NEWLINES = u"Lorem ipsum\ndolor sit amet, consectetur adipiscing e" \
u"lit. Nunc ipsum augue, iaculis quis\nauctor eu, adipi" \
u"scing non est. Nullam id sem diam, eget varius dui. E" \
u"tiam\nsollicitudin sapien nec odio elementum sit amet" \
u" luctus magna volutpat. Ut\ncommodo nulla neque. Aliq" \
u"uam erat volutpat. Integer et nunc augue.\nPellentesq" \
u"ue habitant morbi tristique senectus et netus et male" \
u"suada fames\nac turpis egestas. Quisque at enim nulla" \
u", vel tincidunt urna. Nam leo\naugue, elementum ut vi" \
u"verra eget, scelerisque in purus. In arcu orci, porta" \
u"\nnec aliquet quis, pretium a sem. In fermentum nisl " \
u"id diam luctus viverra.\nNullam semper, metus at euis" \
u"mod vulputate, orci odio dignissim urna, quis\niaculi" \
u"s neque lacus ut tortor. Ut a justo non dolor venenat" \
u"is accumsan.\nProin dolor eros, aliquam id condimentu" \
u"m et, aliquam quis metus. Vivamus\neget purus diam."
TEXT = TEXT_WITH_NEWLINES.replace('\n', ' ')
PYTHON = sys.executable
PY3 = sys.version_info[0] == 3
_cmds = {}
if PY3:
basestring = str
# ===================================================================
# utils
# ===================================================================
def safe_print(text, file=sys.stdout, flush=False):
"""Prints a (unicode) string to the console, encoded depending on
the stdout/file encoding (eg. cp437 on Windows). This is to avoid
encoding errors in case of funky path names.
Works with Python 2 and 3.
"""
if not isinstance(text, basestring):
return print(text, file=file)
try:
file.write(text)
except UnicodeEncodeError:
bytes_string = text.encode(file.encoding, 'backslashreplace')
if hasattr(file, 'buffer'):
file.buffer.write(bytes_string)
else:
text = bytes_string.decode(file.encoding, 'strict')
file.write(text)
file.write("\n")
def sh(cmd, nolog=False):
if not nolog:
safe_print("cmd: " + cmd)
p = subprocess.Popen(cmd, shell=True, env=os.environ, cwd=os.getcwd(),
stdout=subprocess.PIPE)
out, _ = p.communicate()
if PY3:
out = out.decode(sys.stdout.encoding, sys.stdout.errors)
print(out)
if p.returncode != 0:
sys.exit(p.returncode)
return out
def cmd(fun):
@functools.wraps(fun)
def wrapper(*args, **kwds):
return fun(*args, **kwds)
_cmds[fun.__name__] = fun.__doc__
return wrapper
def test_setup():
os.environ['PYTHONWARNINGS'] = 'all'
def install_pip():
try:
import pip # NOQA
except ImportError:
sh("%s %s" % (PYTHON,
os.path.join(DATA_DIR, "get-pip.py")))
def install_setuptools():
try:
import setuptools # NOQA
except ImportError:
sh('%s -c "import setuptools"' % PYTHON)
# ===================================================================
# commands
# ===================================================================
@cmd
def help():
"""Print this help"""
safe_print('Run "make [-p <PYTHON>] <target>" where <target> is one of:')
for name in sorted(_cmds):
safe_print(
" %-20s %s" % (name.replace('_', '-'), _cmds[name] or ''))
sys.exit(1)
@cmd
def build():
"""Build / compile"""
# Make sure setuptools is installed (needed for 'develop' /
# edit mode).
install_setuptools()
sh("%s setup.py build" % PYTHON)
sh("%s setup.py build_ext -i" % PYTHON)
sh('%s -c "import %s"' % (PYTHON, PRJNAME))
@cmd
def install():
"""Install in develop / edit mode"""
build()
sh("%s setup.py develop" % PYTHON)
@cmd
def uninstall():
"""Uninstall %s""" % PRJNAME
clean()
install_pip()
here = os.getcwd()
try:
os.chdir('C:\\')
while True:
try:
__import__(PRJNAME, fromlist=[' '])
except ImportError:
break
else:
sh("%s -m pip uninstall -y %s" % (PYTHON, PRJNAME))
finally:
os.chdir(here)
for dir in site.getsitepackages():
for name in os.listdir(dir):
if name.startswith(PRJNAME):
rm(os.path.join(dir, name))
@cmd
def clean():
"""Deletes dev files"""
rm("$testfn*")
rm("*.bak")
rm("*.core")
rm("*.egg-info")
rm("*.orig")
rm("*.pyc")
rm("*.pyd")
rm("*.pyo")
rm("*.rej")
rm("*.so")
rm("*.~")
rm("*__pycache__")
rm(".coverage")
rm(".tox")
rm(".coverage")
rm("build")
rm("dist")
rm("docs/_build")
rm("htmlcov")
rm("tmp")
rm("venv")
@cmd
def pydeps():
"""Install useful deps"""
install_pip()
install_setuptools()
sh("%s -m pip install -U -r %s" % (PYTHON, REQUIREMENTS_TXT))
@cmd
def lint():
"""Run flake8 against all py files"""
py_files = subprocess.check_output("git ls-files")
if PY3:
py_files = py_files.decode()
py_files = [x for x in py_files.split() if x.endswith('.py')]
py_files = ' '.join(py_files)
sh("%s -m flake8 %s" % (PYTHON, py_files), nolog=True)
@cmd
def test():
"""Run tests"""
install()
test_setup()
sh("%s %s" % (PYTHON, TEST_SCRIPT))
@cmd
def ci():
"""Run CI tests."""
pydeps()
test()
pyinstaller()
@cmd
def coverage():
"""Run coverage tests."""
# Note: coverage options are controlled by .coveragerc file
install()
test_setup()
sh("%s -m coverage run %s" % (PYTHON, TEST_SCRIPT))
sh("%s -m coverage report" % PYTHON)
sh("%s -m coverage html" % PYTHON)
sh("%s -m webbrowser -t htmlcov/index.html" % PYTHON)
@cmd
def test_by_name():
"""Run test by name"""
try:
safe_print(sys.argv)
name = sys.argv[2]
except IndexError:
sys.exit('second arg missing')
install()
test_setup()
sh("%s -m unittest -v %s" % (PYTHON, name))
def set_python(s):
global PYTHON
if os.path.isabs(s):
PYTHON = s
else:
# try to look for a python installation
orig = s
s = s.replace('.', '')
vers = ('26', '27', '34', '35', '36', '37',
'26-64', '27-64', '34-64', '35-64', '36-64', '37-64')
for v in vers:
if s == v:
path = 'C:\\python%s\\python.exe' % s
if os.path.isfile(path):
print(path)
PYTHON = path
os.putenv('PYTHON', path)
return
return sys.exit(
"can't find any python installation matching %r" % orig)
def is_windows64():
return 'PROGRAMFILES(X86)' in os.environ
def venv():
"""Install venv + deps."""
try:
import virtualenv # NOQA
except ImportError:
sh("%s -m pip install virtualenv" % PYTHON)
if not os.path.isdir("venv"):
sh("%s -m virtualenv venv" % PYTHON)
sh("venv\\Scripts\\pip install -r %s" % (REQUIREMENTS_TXT))
@cmd
def pyinstaller():
"""Creates a stand alone Windows as dist/%s.exe.""" % PRJNAME
def assertMultiLineEqual(a, b):
import unittest
tc = unittest.TestCase('__init__')
tc.assertMultiLineEqual(a, b)
def install_deps():
sh("venv\\Scripts\\python -m pip install pyinstaller pypiwin32")
sh("venv\\Scripts\\python -m pip install "
"https://github.com/mattgwwalker/msg-extractor/zipball/"
"master#egg=ExtractMsg")
sh("venv\\Scripts\\python setup.py install")
def run_pyinstaller():
rm(os.path.join(ROOT_DIR, "dist"))
bindir = os.path.join(
DATA_DIR, "bin64" if is_windows64() else "bin32")
assert os.path.exists(bindir), bindir
sh("venv\\Scripts\\pyinstaller --upx-dir=%s pyinstaller.spec" % bindir)
def test_exe():
# Make sure the resulting .exe works.
exe = os.path.join(ROOT_DIR, "dist", "%s.exe" % PRJNAME)
assert os.path.exists(exe), exe
# Test those extensions for which we know we rely on external exes.
out = sh("%s extract %s" % (
exe, os.path.join(ROOT_DIR, "fulltext/test/files/test.pdf")))
assertMultiLineEqual(out.strip(), TEXT.strip())
out = sh("%s extract %s" % (
exe, os.path.join(ROOT_DIR, "fulltext/test/files/test.rtf")))
assertMultiLineEqual(out.strip(), TEXT.strip())
venv()
install_deps()
run_pyinstaller()
test_exe()
def parse_cmdline():
if '-p' in sys.argv:
try:
pos = sys.argv.index('-p')
sys.argv.pop(pos)
py = sys.argv.pop(pos)
except IndexError:
return help()
set_python(py)
def main():
parse_cmdline()
try:
cmd = sys.argv[1].replace('-', '_')
except IndexError:
return help()
if cmd in _cmds:
fun = getattr(sys.modules[__name__], cmd)
fun()
else:
help()
if __name__ == '__main__':
main()
|
btimby/fulltext | fulltext/data/winmake.py | help | python | def help():
safe_print('Run "make [-p <PYTHON>] <target>" where <target> is one of:')
for name in sorted(_cmds):
safe_print(
" %-20s %s" % (name.replace('_', '-'), _cmds[name] or ''))
sys.exit(1) | Print this help | train | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/data/winmake.py#L149-L155 | [
"def safe_print(text, file=sys.stdout, flush=False):\n \"\"\"Prints a (unicode) string to the console, encoded depending on\n the stdout/file encoding (eg. cp437 on Windows). This is to avoid\n encoding errors in case of funky path names.\n Works with Python 2 and 3.\n \"\"\"\n if not isinstance(text, basestring):\n return print(text, file=file)\n try:\n file.write(text)\n except UnicodeEncodeError:\n bytes_string = text.encode(file.encoding, 'backslashreplace')\n if hasattr(file, 'buffer'):\n file.buffer.write(bytes_string)\n else:\n text = bytes_string.decode(file.encoding, 'strict')\n file.write(text)\n file.write(\"\\n\")\n"
] | #!/usr/bin/env python
# Copyright (c) 2009 Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Shortcuts for various tasks, emulating UNIX "make" on Windows.
This is supposed to be invoked by "make.bat" and not used directly.
This was originally written as a bat file but they suck so much
that they should be deemed illegal!
"""
from __future__ import print_function
import errno
import glob
import functools
import os
import shutil
import site
import subprocess
import sys
# --- configurable
PRJNAME = "fulltext"
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_SCRIPT = 'fulltext\\test\\__init__.py'
ROOT_DIR = os.path.realpath(os.path.join(HERE, "..", ".."))
DATA_DIR = os.path.join(ROOT_DIR, PRJNAME, "data")
REQUIREMENTS_TXT = "requirements.txt"
# --- others
TEXT_WITH_NEWLINES = u"Lorem ipsum\ndolor sit amet, consectetur adipiscing e" \
u"lit. Nunc ipsum augue, iaculis quis\nauctor eu, adipi" \
u"scing non est. Nullam id sem diam, eget varius dui. E" \
u"tiam\nsollicitudin sapien nec odio elementum sit amet" \
u" luctus magna volutpat. Ut\ncommodo nulla neque. Aliq" \
u"uam erat volutpat. Integer et nunc augue.\nPellentesq" \
u"ue habitant morbi tristique senectus et netus et male" \
u"suada fames\nac turpis egestas. Quisque at enim nulla" \
u", vel tincidunt urna. Nam leo\naugue, elementum ut vi" \
u"verra eget, scelerisque in purus. In arcu orci, porta" \
u"\nnec aliquet quis, pretium a sem. In fermentum nisl " \
u"id diam luctus viverra.\nNullam semper, metus at euis" \
u"mod vulputate, orci odio dignissim urna, quis\niaculi" \
u"s neque lacus ut tortor. Ut a justo non dolor venenat" \
u"is accumsan.\nProin dolor eros, aliquam id condimentu" \
u"m et, aliquam quis metus. Vivamus\neget purus diam."
TEXT = TEXT_WITH_NEWLINES.replace('\n', ' ')
PYTHON = sys.executable
PY3 = sys.version_info[0] == 3
_cmds = {}
if PY3:
basestring = str
# ===================================================================
# utils
# ===================================================================
def safe_print(text, file=sys.stdout, flush=False):
"""Prints a (unicode) string to the console, encoded depending on
the stdout/file encoding (eg. cp437 on Windows). This is to avoid
encoding errors in case of funky path names.
Works with Python 2 and 3.
"""
if not isinstance(text, basestring):
return print(text, file=file)
try:
file.write(text)
except UnicodeEncodeError:
bytes_string = text.encode(file.encoding, 'backslashreplace')
if hasattr(file, 'buffer'):
file.buffer.write(bytes_string)
else:
text = bytes_string.decode(file.encoding, 'strict')
file.write(text)
file.write("\n")
def sh(cmd, nolog=False):
if not nolog:
safe_print("cmd: " + cmd)
p = subprocess.Popen(cmd, shell=True, env=os.environ, cwd=os.getcwd(),
stdout=subprocess.PIPE)
out, _ = p.communicate()
if PY3:
out = out.decode(sys.stdout.encoding, sys.stdout.errors)
print(out)
if p.returncode != 0:
sys.exit(p.returncode)
return out
def cmd(fun):
@functools.wraps(fun)
def wrapper(*args, **kwds):
return fun(*args, **kwds)
_cmds[fun.__name__] = fun.__doc__
return wrapper
def rm(pattern):
"""Recursively remove a file or dir by pattern."""
paths = glob.glob(pattern)
for path in paths:
if path.startswith('.git/'):
continue
if os.path.isdir(path):
def onerror(fun, path, excinfo):
exc = excinfo[1]
if exc.errno != errno.ENOENT:
raise
safe_print("rmdir -f %s" % path)
shutil.rmtree(path, onerror=onerror)
else:
safe_print("rm %s" % path)
os.remove(path)
def test_setup():
os.environ['PYTHONWARNINGS'] = 'all'
def install_pip():
try:
import pip # NOQA
except ImportError:
sh("%s %s" % (PYTHON,
os.path.join(DATA_DIR, "get-pip.py")))
def install_setuptools():
try:
import setuptools # NOQA
except ImportError:
sh('%s -c "import setuptools"' % PYTHON)
# ===================================================================
# commands
# ===================================================================
@cmd
@cmd
def build():
"""Build / compile"""
# Make sure setuptools is installed (needed for 'develop' /
# edit mode).
install_setuptools()
sh("%s setup.py build" % PYTHON)
sh("%s setup.py build_ext -i" % PYTHON)
sh('%s -c "import %s"' % (PYTHON, PRJNAME))
@cmd
def install():
"""Install in develop / edit mode"""
build()
sh("%s setup.py develop" % PYTHON)
@cmd
def uninstall():
"""Uninstall %s""" % PRJNAME
clean()
install_pip()
here = os.getcwd()
try:
os.chdir('C:\\')
while True:
try:
__import__(PRJNAME, fromlist=[' '])
except ImportError:
break
else:
sh("%s -m pip uninstall -y %s" % (PYTHON, PRJNAME))
finally:
os.chdir(here)
for dir in site.getsitepackages():
for name in os.listdir(dir):
if name.startswith(PRJNAME):
rm(os.path.join(dir, name))
@cmd
def clean():
"""Deletes dev files"""
rm("$testfn*")
rm("*.bak")
rm("*.core")
rm("*.egg-info")
rm("*.orig")
rm("*.pyc")
rm("*.pyd")
rm("*.pyo")
rm("*.rej")
rm("*.so")
rm("*.~")
rm("*__pycache__")
rm(".coverage")
rm(".tox")
rm(".coverage")
rm("build")
rm("dist")
rm("docs/_build")
rm("htmlcov")
rm("tmp")
rm("venv")
@cmd
def pydeps():
"""Install useful deps"""
install_pip()
install_setuptools()
sh("%s -m pip install -U -r %s" % (PYTHON, REQUIREMENTS_TXT))
@cmd
def lint():
"""Run flake8 against all py files"""
py_files = subprocess.check_output("git ls-files")
if PY3:
py_files = py_files.decode()
py_files = [x for x in py_files.split() if x.endswith('.py')]
py_files = ' '.join(py_files)
sh("%s -m flake8 %s" % (PYTHON, py_files), nolog=True)
@cmd
def test():
"""Run tests"""
install()
test_setup()
sh("%s %s" % (PYTHON, TEST_SCRIPT))
@cmd
def ci():
"""Run CI tests."""
pydeps()
test()
pyinstaller()
@cmd
def coverage():
"""Run coverage tests."""
# Note: coverage options are controlled by .coveragerc file
install()
test_setup()
sh("%s -m coverage run %s" % (PYTHON, TEST_SCRIPT))
sh("%s -m coverage report" % PYTHON)
sh("%s -m coverage html" % PYTHON)
sh("%s -m webbrowser -t htmlcov/index.html" % PYTHON)
@cmd
def test_by_name():
"""Run test by name"""
try:
safe_print(sys.argv)
name = sys.argv[2]
except IndexError:
sys.exit('second arg missing')
install()
test_setup()
sh("%s -m unittest -v %s" % (PYTHON, name))
def set_python(s):
global PYTHON
if os.path.isabs(s):
PYTHON = s
else:
# try to look for a python installation
orig = s
s = s.replace('.', '')
vers = ('26', '27', '34', '35', '36', '37',
'26-64', '27-64', '34-64', '35-64', '36-64', '37-64')
for v in vers:
if s == v:
path = 'C:\\python%s\\python.exe' % s
if os.path.isfile(path):
print(path)
PYTHON = path
os.putenv('PYTHON', path)
return
return sys.exit(
"can't find any python installation matching %r" % orig)
def is_windows64():
return 'PROGRAMFILES(X86)' in os.environ
def venv():
"""Install venv + deps."""
try:
import virtualenv # NOQA
except ImportError:
sh("%s -m pip install virtualenv" % PYTHON)
if not os.path.isdir("venv"):
sh("%s -m virtualenv venv" % PYTHON)
sh("venv\\Scripts\\pip install -r %s" % (REQUIREMENTS_TXT))
@cmd
def pyinstaller():
"""Creates a stand alone Windows as dist/%s.exe.""" % PRJNAME
def assertMultiLineEqual(a, b):
import unittest
tc = unittest.TestCase('__init__')
tc.assertMultiLineEqual(a, b)
def install_deps():
sh("venv\\Scripts\\python -m pip install pyinstaller pypiwin32")
sh("venv\\Scripts\\python -m pip install "
"https://github.com/mattgwwalker/msg-extractor/zipball/"
"master#egg=ExtractMsg")
sh("venv\\Scripts\\python setup.py install")
def run_pyinstaller():
rm(os.path.join(ROOT_DIR, "dist"))
bindir = os.path.join(
DATA_DIR, "bin64" if is_windows64() else "bin32")
assert os.path.exists(bindir), bindir
sh("venv\\Scripts\\pyinstaller --upx-dir=%s pyinstaller.spec" % bindir)
def test_exe():
# Make sure the resulting .exe works.
exe = os.path.join(ROOT_DIR, "dist", "%s.exe" % PRJNAME)
assert os.path.exists(exe), exe
# Test those extensions for which we know we rely on external exes.
out = sh("%s extract %s" % (
exe, os.path.join(ROOT_DIR, "fulltext/test/files/test.pdf")))
assertMultiLineEqual(out.strip(), TEXT.strip())
out = sh("%s extract %s" % (
exe, os.path.join(ROOT_DIR, "fulltext/test/files/test.rtf")))
assertMultiLineEqual(out.strip(), TEXT.strip())
venv()
install_deps()
run_pyinstaller()
test_exe()
def parse_cmdline():
if '-p' in sys.argv:
try:
pos = sys.argv.index('-p')
sys.argv.pop(pos)
py = sys.argv.pop(pos)
except IndexError:
return help()
set_python(py)
def main():
parse_cmdline()
try:
cmd = sys.argv[1].replace('-', '_')
except IndexError:
return help()
if cmd in _cmds:
fun = getattr(sys.modules[__name__], cmd)
fun()
else:
help()
if __name__ == '__main__':
main()
|
btimby/fulltext | fulltext/data/winmake.py | clean | python | def clean():
rm("$testfn*")
rm("*.bak")
rm("*.core")
rm("*.egg-info")
rm("*.orig")
rm("*.pyc")
rm("*.pyd")
rm("*.pyo")
rm("*.rej")
rm("*.so")
rm("*.~")
rm("*__pycache__")
rm(".coverage")
rm(".tox")
rm(".coverage")
rm("build")
rm("dist")
rm("docs/_build")
rm("htmlcov")
rm("tmp")
rm("venv") | Deletes dev files | train | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/data/winmake.py#L200-L222 | [
"def rm(pattern):\n \"\"\"Recursively remove a file or dir by pattern.\"\"\"\n paths = glob.glob(pattern)\n for path in paths:\n if path.startswith('.git/'):\n continue\n if os.path.isdir(path):\n def onerror(fun, path, excinfo):\n exc = excinfo[1]\n if exc.errno != errno.ENOENT:\n raise\n\n safe_print(\"rmdir -f %s\" % path)\n shutil.rmtree(path, onerror=onerror)\n else:\n safe_print(\"rm %s\" % path)\n os.remove(path)\n"
] | #!/usr/bin/env python
# Copyright (c) 2009 Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Shortcuts for various tasks, emulating UNIX "make" on Windows.
This is supposed to be invoked by "make.bat" and not used directly.
This was originally written as a bat file but they suck so much
that they should be deemed illegal!
"""
from __future__ import print_function
import errno
import glob
import functools
import os
import shutil
import site
import subprocess
import sys
# --- configurable
PRJNAME = "fulltext"
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_SCRIPT = 'fulltext\\test\\__init__.py'
ROOT_DIR = os.path.realpath(os.path.join(HERE, "..", ".."))
DATA_DIR = os.path.join(ROOT_DIR, PRJNAME, "data")
REQUIREMENTS_TXT = "requirements.txt"
# --- others
TEXT_WITH_NEWLINES = u"Lorem ipsum\ndolor sit amet, consectetur adipiscing e" \
u"lit. Nunc ipsum augue, iaculis quis\nauctor eu, adipi" \
u"scing non est. Nullam id sem diam, eget varius dui. E" \
u"tiam\nsollicitudin sapien nec odio elementum sit amet" \
u" luctus magna volutpat. Ut\ncommodo nulla neque. Aliq" \
u"uam erat volutpat. Integer et nunc augue.\nPellentesq" \
u"ue habitant morbi tristique senectus et netus et male" \
u"suada fames\nac turpis egestas. Quisque at enim nulla" \
u", vel tincidunt urna. Nam leo\naugue, elementum ut vi" \
u"verra eget, scelerisque in purus. In arcu orci, porta" \
u"\nnec aliquet quis, pretium a sem. In fermentum nisl " \
u"id diam luctus viverra.\nNullam semper, metus at euis" \
u"mod vulputate, orci odio dignissim urna, quis\niaculi" \
u"s neque lacus ut tortor. Ut a justo non dolor venenat" \
u"is accumsan.\nProin dolor eros, aliquam id condimentu" \
u"m et, aliquam quis metus. Vivamus\neget purus diam."
TEXT = TEXT_WITH_NEWLINES.replace('\n', ' ')
PYTHON = sys.executable
PY3 = sys.version_info[0] == 3
_cmds = {}
if PY3:
basestring = str
# ===================================================================
# utils
# ===================================================================
def safe_print(text, file=sys.stdout, flush=False):
"""Prints a (unicode) string to the console, encoded depending on
the stdout/file encoding (eg. cp437 on Windows). This is to avoid
encoding errors in case of funky path names.
Works with Python 2 and 3.
"""
if not isinstance(text, basestring):
return print(text, file=file)
try:
file.write(text)
except UnicodeEncodeError:
bytes_string = text.encode(file.encoding, 'backslashreplace')
if hasattr(file, 'buffer'):
file.buffer.write(bytes_string)
else:
text = bytes_string.decode(file.encoding, 'strict')
file.write(text)
file.write("\n")
def sh(cmd, nolog=False):
if not nolog:
safe_print("cmd: " + cmd)
p = subprocess.Popen(cmd, shell=True, env=os.environ, cwd=os.getcwd(),
stdout=subprocess.PIPE)
out, _ = p.communicate()
if PY3:
out = out.decode(sys.stdout.encoding, sys.stdout.errors)
print(out)
if p.returncode != 0:
sys.exit(p.returncode)
return out
def cmd(fun):
@functools.wraps(fun)
def wrapper(*args, **kwds):
return fun(*args, **kwds)
_cmds[fun.__name__] = fun.__doc__
return wrapper
def rm(pattern):
"""Recursively remove a file or dir by pattern."""
paths = glob.glob(pattern)
for path in paths:
if path.startswith('.git/'):
continue
if os.path.isdir(path):
def onerror(fun, path, excinfo):
exc = excinfo[1]
if exc.errno != errno.ENOENT:
raise
safe_print("rmdir -f %s" % path)
shutil.rmtree(path, onerror=onerror)
else:
safe_print("rm %s" % path)
os.remove(path)
def test_setup():
os.environ['PYTHONWARNINGS'] = 'all'
def install_pip():
try:
import pip # NOQA
except ImportError:
sh("%s %s" % (PYTHON,
os.path.join(DATA_DIR, "get-pip.py")))
def install_setuptools():
try:
import setuptools # NOQA
except ImportError:
sh('%s -c "import setuptools"' % PYTHON)
# ===================================================================
# commands
# ===================================================================
@cmd
def help():
"""Print this help"""
safe_print('Run "make [-p <PYTHON>] <target>" where <target> is one of:')
for name in sorted(_cmds):
safe_print(
" %-20s %s" % (name.replace('_', '-'), _cmds[name] or ''))
sys.exit(1)
@cmd
def build():
"""Build / compile"""
# Make sure setuptools is installed (needed for 'develop' /
# edit mode).
install_setuptools()
sh("%s setup.py build" % PYTHON)
sh("%s setup.py build_ext -i" % PYTHON)
sh('%s -c "import %s"' % (PYTHON, PRJNAME))
@cmd
def install():
"""Install in develop / edit mode"""
build()
sh("%s setup.py develop" % PYTHON)
@cmd
def uninstall():
"""Uninstall %s""" % PRJNAME
clean()
install_pip()
here = os.getcwd()
try:
os.chdir('C:\\')
while True:
try:
__import__(PRJNAME, fromlist=[' '])
except ImportError:
break
else:
sh("%s -m pip uninstall -y %s" % (PYTHON, PRJNAME))
finally:
os.chdir(here)
for dir in site.getsitepackages():
for name in os.listdir(dir):
if name.startswith(PRJNAME):
rm(os.path.join(dir, name))
@cmd
@cmd
def pydeps():
"""Install useful deps"""
install_pip()
install_setuptools()
sh("%s -m pip install -U -r %s" % (PYTHON, REQUIREMENTS_TXT))
@cmd
def lint():
"""Run flake8 against all py files"""
py_files = subprocess.check_output("git ls-files")
if PY3:
py_files = py_files.decode()
py_files = [x for x in py_files.split() if x.endswith('.py')]
py_files = ' '.join(py_files)
sh("%s -m flake8 %s" % (PYTHON, py_files), nolog=True)
@cmd
def test():
"""Run tests"""
install()
test_setup()
sh("%s %s" % (PYTHON, TEST_SCRIPT))
@cmd
def ci():
"""Run CI tests."""
pydeps()
test()
pyinstaller()
@cmd
def coverage():
"""Run coverage tests."""
# Note: coverage options are controlled by .coveragerc file
install()
test_setup()
sh("%s -m coverage run %s" % (PYTHON, TEST_SCRIPT))
sh("%s -m coverage report" % PYTHON)
sh("%s -m coverage html" % PYTHON)
sh("%s -m webbrowser -t htmlcov/index.html" % PYTHON)
@cmd
def test_by_name():
"""Run test by name"""
try:
safe_print(sys.argv)
name = sys.argv[2]
except IndexError:
sys.exit('second arg missing')
install()
test_setup()
sh("%s -m unittest -v %s" % (PYTHON, name))
def set_python(s):
global PYTHON
if os.path.isabs(s):
PYTHON = s
else:
# try to look for a python installation
orig = s
s = s.replace('.', '')
vers = ('26', '27', '34', '35', '36', '37',
'26-64', '27-64', '34-64', '35-64', '36-64', '37-64')
for v in vers:
if s == v:
path = 'C:\\python%s\\python.exe' % s
if os.path.isfile(path):
print(path)
PYTHON = path
os.putenv('PYTHON', path)
return
return sys.exit(
"can't find any python installation matching %r" % orig)
def is_windows64():
return 'PROGRAMFILES(X86)' in os.environ
def venv():
"""Install venv + deps."""
try:
import virtualenv # NOQA
except ImportError:
sh("%s -m pip install virtualenv" % PYTHON)
if not os.path.isdir("venv"):
sh("%s -m virtualenv venv" % PYTHON)
sh("venv\\Scripts\\pip install -r %s" % (REQUIREMENTS_TXT))
@cmd
def pyinstaller():
"""Creates a stand alone Windows as dist/%s.exe.""" % PRJNAME
def assertMultiLineEqual(a, b):
import unittest
tc = unittest.TestCase('__init__')
tc.assertMultiLineEqual(a, b)
def install_deps():
sh("venv\\Scripts\\python -m pip install pyinstaller pypiwin32")
sh("venv\\Scripts\\python -m pip install "
"https://github.com/mattgwwalker/msg-extractor/zipball/"
"master#egg=ExtractMsg")
sh("venv\\Scripts\\python setup.py install")
def run_pyinstaller():
rm(os.path.join(ROOT_DIR, "dist"))
bindir = os.path.join(
DATA_DIR, "bin64" if is_windows64() else "bin32")
assert os.path.exists(bindir), bindir
sh("venv\\Scripts\\pyinstaller --upx-dir=%s pyinstaller.spec" % bindir)
def test_exe():
# Make sure the resulting .exe works.
exe = os.path.join(ROOT_DIR, "dist", "%s.exe" % PRJNAME)
assert os.path.exists(exe), exe
# Test those extensions for which we know we rely on external exes.
out = sh("%s extract %s" % (
exe, os.path.join(ROOT_DIR, "fulltext/test/files/test.pdf")))
assertMultiLineEqual(out.strip(), TEXT.strip())
out = sh("%s extract %s" % (
exe, os.path.join(ROOT_DIR, "fulltext/test/files/test.rtf")))
assertMultiLineEqual(out.strip(), TEXT.strip())
venv()
install_deps()
run_pyinstaller()
test_exe()
def parse_cmdline():
if '-p' in sys.argv:
try:
pos = sys.argv.index('-p')
sys.argv.pop(pos)
py = sys.argv.pop(pos)
except IndexError:
return help()
set_python(py)
def main():
parse_cmdline()
try:
cmd = sys.argv[1].replace('-', '_')
except IndexError:
return help()
if cmd in _cmds:
fun = getattr(sys.modules[__name__], cmd)
fun()
else:
help()
if __name__ == '__main__':
main()
|
btimby/fulltext | fulltext/data/winmake.py | lint | python | def lint():
py_files = subprocess.check_output("git ls-files")
if PY3:
py_files = py_files.decode()
py_files = [x for x in py_files.split() if x.endswith('.py')]
py_files = ' '.join(py_files)
sh("%s -m flake8 %s" % (PYTHON, py_files), nolog=True) | Run flake8 against all py files | train | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/data/winmake.py#L234-L241 | [
"def sh(cmd, nolog=False):\n if not nolog:\n safe_print(\"cmd: \" + cmd)\n p = subprocess.Popen(cmd, shell=True, env=os.environ, cwd=os.getcwd(),\n stdout=subprocess.PIPE)\n out, _ = p.communicate()\n if PY3:\n out = out.decode(sys.stdout.encoding, sys.stdout.errors)\n print(out)\n if p.returncode != 0:\n sys.exit(p.returncode)\n return out\n"
] | #!/usr/bin/env python
# Copyright (c) 2009 Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Shortcuts for various tasks, emulating UNIX "make" on Windows.
This is supposed to be invoked by "make.bat" and not used directly.
This was originally written as a bat file but they suck so much
that they should be deemed illegal!
"""
from __future__ import print_function
import errno
import glob
import functools
import os
import shutil
import site
import subprocess
import sys
# --- configurable
PRJNAME = "fulltext"
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_SCRIPT = 'fulltext\\test\\__init__.py'
ROOT_DIR = os.path.realpath(os.path.join(HERE, "..", ".."))
DATA_DIR = os.path.join(ROOT_DIR, PRJNAME, "data")
REQUIREMENTS_TXT = "requirements.txt"
# --- others
TEXT_WITH_NEWLINES = u"Lorem ipsum\ndolor sit amet, consectetur adipiscing e" \
u"lit. Nunc ipsum augue, iaculis quis\nauctor eu, adipi" \
u"scing non est. Nullam id sem diam, eget varius dui. E" \
u"tiam\nsollicitudin sapien nec odio elementum sit amet" \
u" luctus magna volutpat. Ut\ncommodo nulla neque. Aliq" \
u"uam erat volutpat. Integer et nunc augue.\nPellentesq" \
u"ue habitant morbi tristique senectus et netus et male" \
u"suada fames\nac turpis egestas. Quisque at enim nulla" \
u", vel tincidunt urna. Nam leo\naugue, elementum ut vi" \
u"verra eget, scelerisque in purus. In arcu orci, porta" \
u"\nnec aliquet quis, pretium a sem. In fermentum nisl " \
u"id diam luctus viverra.\nNullam semper, metus at euis" \
u"mod vulputate, orci odio dignissim urna, quis\niaculi" \
u"s neque lacus ut tortor. Ut a justo non dolor venenat" \
u"is accumsan.\nProin dolor eros, aliquam id condimentu" \
u"m et, aliquam quis metus. Vivamus\neget purus diam."
TEXT = TEXT_WITH_NEWLINES.replace('\n', ' ')
PYTHON = sys.executable
PY3 = sys.version_info[0] == 3
_cmds = {}
if PY3:
basestring = str
# ===================================================================
# utils
# ===================================================================
def safe_print(text, file=sys.stdout, flush=False):
"""Prints a (unicode) string to the console, encoded depending on
the stdout/file encoding (eg. cp437 on Windows). This is to avoid
encoding errors in case of funky path names.
Works with Python 2 and 3.
"""
if not isinstance(text, basestring):
return print(text, file=file)
try:
file.write(text)
except UnicodeEncodeError:
bytes_string = text.encode(file.encoding, 'backslashreplace')
if hasattr(file, 'buffer'):
file.buffer.write(bytes_string)
else:
text = bytes_string.decode(file.encoding, 'strict')
file.write(text)
file.write("\n")
def sh(cmd, nolog=False):
if not nolog:
safe_print("cmd: " + cmd)
p = subprocess.Popen(cmd, shell=True, env=os.environ, cwd=os.getcwd(),
stdout=subprocess.PIPE)
out, _ = p.communicate()
if PY3:
out = out.decode(sys.stdout.encoding, sys.stdout.errors)
print(out)
if p.returncode != 0:
sys.exit(p.returncode)
return out
def cmd(fun):
@functools.wraps(fun)
def wrapper(*args, **kwds):
return fun(*args, **kwds)
_cmds[fun.__name__] = fun.__doc__
return wrapper
def rm(pattern):
"""Recursively remove a file or dir by pattern."""
paths = glob.glob(pattern)
for path in paths:
if path.startswith('.git/'):
continue
if os.path.isdir(path):
def onerror(fun, path, excinfo):
exc = excinfo[1]
if exc.errno != errno.ENOENT:
raise
safe_print("rmdir -f %s" % path)
shutil.rmtree(path, onerror=onerror)
else:
safe_print("rm %s" % path)
os.remove(path)
def test_setup():
os.environ['PYTHONWARNINGS'] = 'all'
def install_pip():
try:
import pip # NOQA
except ImportError:
sh("%s %s" % (PYTHON,
os.path.join(DATA_DIR, "get-pip.py")))
def install_setuptools():
try:
import setuptools # NOQA
except ImportError:
sh('%s -c "import setuptools"' % PYTHON)
# ===================================================================
# commands
# ===================================================================
@cmd
def help():
"""Print this help"""
safe_print('Run "make [-p <PYTHON>] <target>" where <target> is one of:')
for name in sorted(_cmds):
safe_print(
" %-20s %s" % (name.replace('_', '-'), _cmds[name] or ''))
sys.exit(1)
@cmd
def build():
"""Build / compile"""
# Make sure setuptools is installed (needed for 'develop' /
# edit mode).
install_setuptools()
sh("%s setup.py build" % PYTHON)
sh("%s setup.py build_ext -i" % PYTHON)
sh('%s -c "import %s"' % (PYTHON, PRJNAME))
@cmd
def install():
"""Install in develop / edit mode"""
build()
sh("%s setup.py develop" % PYTHON)
@cmd
def uninstall():
"""Uninstall %s""" % PRJNAME
clean()
install_pip()
here = os.getcwd()
try:
os.chdir('C:\\')
while True:
try:
__import__(PRJNAME, fromlist=[' '])
except ImportError:
break
else:
sh("%s -m pip uninstall -y %s" % (PYTHON, PRJNAME))
finally:
os.chdir(here)
for dir in site.getsitepackages():
for name in os.listdir(dir):
if name.startswith(PRJNAME):
rm(os.path.join(dir, name))
@cmd
def clean():
"""Deletes dev files"""
rm("$testfn*")
rm("*.bak")
rm("*.core")
rm("*.egg-info")
rm("*.orig")
rm("*.pyc")
rm("*.pyd")
rm("*.pyo")
rm("*.rej")
rm("*.so")
rm("*.~")
rm("*__pycache__")
rm(".coverage")
rm(".tox")
rm(".coverage")
rm("build")
rm("dist")
rm("docs/_build")
rm("htmlcov")
rm("tmp")
rm("venv")
@cmd
def pydeps():
"""Install useful deps"""
install_pip()
install_setuptools()
sh("%s -m pip install -U -r %s" % (PYTHON, REQUIREMENTS_TXT))
@cmd
@cmd
def test():
"""Run tests"""
install()
test_setup()
sh("%s %s" % (PYTHON, TEST_SCRIPT))
@cmd
def ci():
"""Run CI tests."""
pydeps()
test()
pyinstaller()
@cmd
def coverage():
"""Run coverage tests."""
# Note: coverage options are controlled by .coveragerc file
install()
test_setup()
sh("%s -m coverage run %s" % (PYTHON, TEST_SCRIPT))
sh("%s -m coverage report" % PYTHON)
sh("%s -m coverage html" % PYTHON)
sh("%s -m webbrowser -t htmlcov/index.html" % PYTHON)
@cmd
def test_by_name():
"""Run test by name"""
try:
safe_print(sys.argv)
name = sys.argv[2]
except IndexError:
sys.exit('second arg missing')
install()
test_setup()
sh("%s -m unittest -v %s" % (PYTHON, name))
def set_python(s):
global PYTHON
if os.path.isabs(s):
PYTHON = s
else:
# try to look for a python installation
orig = s
s = s.replace('.', '')
vers = ('26', '27', '34', '35', '36', '37',
'26-64', '27-64', '34-64', '35-64', '36-64', '37-64')
for v in vers:
if s == v:
path = 'C:\\python%s\\python.exe' % s
if os.path.isfile(path):
print(path)
PYTHON = path
os.putenv('PYTHON', path)
return
return sys.exit(
"can't find any python installation matching %r" % orig)
def is_windows64():
return 'PROGRAMFILES(X86)' in os.environ
def venv():
"""Install venv + deps."""
try:
import virtualenv # NOQA
except ImportError:
sh("%s -m pip install virtualenv" % PYTHON)
if not os.path.isdir("venv"):
sh("%s -m virtualenv venv" % PYTHON)
sh("venv\\Scripts\\pip install -r %s" % (REQUIREMENTS_TXT))
@cmd
def pyinstaller():
"""Creates a stand alone Windows as dist/%s.exe.""" % PRJNAME
def assertMultiLineEqual(a, b):
import unittest
tc = unittest.TestCase('__init__')
tc.assertMultiLineEqual(a, b)
def install_deps():
sh("venv\\Scripts\\python -m pip install pyinstaller pypiwin32")
sh("venv\\Scripts\\python -m pip install "
"https://github.com/mattgwwalker/msg-extractor/zipball/"
"master#egg=ExtractMsg")
sh("venv\\Scripts\\python setup.py install")
def run_pyinstaller():
rm(os.path.join(ROOT_DIR, "dist"))
bindir = os.path.join(
DATA_DIR, "bin64" if is_windows64() else "bin32")
assert os.path.exists(bindir), bindir
sh("venv\\Scripts\\pyinstaller --upx-dir=%s pyinstaller.spec" % bindir)
def test_exe():
# Make sure the resulting .exe works.
exe = os.path.join(ROOT_DIR, "dist", "%s.exe" % PRJNAME)
assert os.path.exists(exe), exe
# Test those extensions for which we know we rely on external exes.
out = sh("%s extract %s" % (
exe, os.path.join(ROOT_DIR, "fulltext/test/files/test.pdf")))
assertMultiLineEqual(out.strip(), TEXT.strip())
out = sh("%s extract %s" % (
exe, os.path.join(ROOT_DIR, "fulltext/test/files/test.rtf")))
assertMultiLineEqual(out.strip(), TEXT.strip())
venv()
install_deps()
run_pyinstaller()
test_exe()
def parse_cmdline():
if '-p' in sys.argv:
try:
pos = sys.argv.index('-p')
sys.argv.pop(pos)
py = sys.argv.pop(pos)
except IndexError:
return help()
set_python(py)
def main():
parse_cmdline()
try:
cmd = sys.argv[1].replace('-', '_')
except IndexError:
return help()
if cmd in _cmds:
fun = getattr(sys.modules[__name__], cmd)
fun()
else:
help()
if __name__ == '__main__':
main()
|
btimby/fulltext | fulltext/data/winmake.py | coverage | python | def coverage():
# Note: coverage options are controlled by .coveragerc file
install()
test_setup()
sh("%s -m coverage run %s" % (PYTHON, TEST_SCRIPT))
sh("%s -m coverage report" % PYTHON)
sh("%s -m coverage html" % PYTHON)
sh("%s -m webbrowser -t htmlcov/index.html" % PYTHON) | Run coverage tests. | train | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/data/winmake.py#L261-L269 | [
"def sh(cmd, nolog=False):\n if not nolog:\n safe_print(\"cmd: \" + cmd)\n p = subprocess.Popen(cmd, shell=True, env=os.environ, cwd=os.getcwd(),\n stdout=subprocess.PIPE)\n out, _ = p.communicate()\n if PY3:\n out = out.decode(sys.stdout.encoding, sys.stdout.errors)\n print(out)\n if p.returncode != 0:\n sys.exit(p.returncode)\n return out\n",
"def test_setup():\n os.environ['PYTHONWARNINGS'] = 'all'\n"
] | #!/usr/bin/env python
# Copyright (c) 2009 Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Shortcuts for various tasks, emulating UNIX "make" on Windows.
This is supposed to be invoked by "make.bat" and not used directly.
This was originally written as a bat file but they suck so much
that they should be deemed illegal!
"""
from __future__ import print_function
import errno
import glob
import functools
import os
import shutil
import site
import subprocess
import sys
# --- configurable
PRJNAME = "fulltext"
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_SCRIPT = 'fulltext\\test\\__init__.py'
ROOT_DIR = os.path.realpath(os.path.join(HERE, "..", ".."))
DATA_DIR = os.path.join(ROOT_DIR, PRJNAME, "data")
REQUIREMENTS_TXT = "requirements.txt"
# --- others
TEXT_WITH_NEWLINES = u"Lorem ipsum\ndolor sit amet, consectetur adipiscing e" \
u"lit. Nunc ipsum augue, iaculis quis\nauctor eu, adipi" \
u"scing non est. Nullam id sem diam, eget varius dui. E" \
u"tiam\nsollicitudin sapien nec odio elementum sit amet" \
u" luctus magna volutpat. Ut\ncommodo nulla neque. Aliq" \
u"uam erat volutpat. Integer et nunc augue.\nPellentesq" \
u"ue habitant morbi tristique senectus et netus et male" \
u"suada fames\nac turpis egestas. Quisque at enim nulla" \
u", vel tincidunt urna. Nam leo\naugue, elementum ut vi" \
u"verra eget, scelerisque in purus. In arcu orci, porta" \
u"\nnec aliquet quis, pretium a sem. In fermentum nisl " \
u"id diam luctus viverra.\nNullam semper, metus at euis" \
u"mod vulputate, orci odio dignissim urna, quis\niaculi" \
u"s neque lacus ut tortor. Ut a justo non dolor venenat" \
u"is accumsan.\nProin dolor eros, aliquam id condimentu" \
u"m et, aliquam quis metus. Vivamus\neget purus diam."
TEXT = TEXT_WITH_NEWLINES.replace('\n', ' ')
PYTHON = sys.executable
PY3 = sys.version_info[0] == 3
_cmds = {}
if PY3:
basestring = str
# ===================================================================
# utils
# ===================================================================
def safe_print(text, file=sys.stdout, flush=False):
"""Prints a (unicode) string to the console, encoded depending on
the stdout/file encoding (eg. cp437 on Windows). This is to avoid
encoding errors in case of funky path names.
Works with Python 2 and 3.
"""
if not isinstance(text, basestring):
return print(text, file=file)
try:
file.write(text)
except UnicodeEncodeError:
bytes_string = text.encode(file.encoding, 'backslashreplace')
if hasattr(file, 'buffer'):
file.buffer.write(bytes_string)
else:
text = bytes_string.decode(file.encoding, 'strict')
file.write(text)
file.write("\n")
def sh(cmd, nolog=False):
if not nolog:
safe_print("cmd: " + cmd)
p = subprocess.Popen(cmd, shell=True, env=os.environ, cwd=os.getcwd(),
stdout=subprocess.PIPE)
out, _ = p.communicate()
if PY3:
out = out.decode(sys.stdout.encoding, sys.stdout.errors)
print(out)
if p.returncode != 0:
sys.exit(p.returncode)
return out
def cmd(fun):
@functools.wraps(fun)
def wrapper(*args, **kwds):
return fun(*args, **kwds)
_cmds[fun.__name__] = fun.__doc__
return wrapper
def rm(pattern):
"""Recursively remove a file or dir by pattern."""
paths = glob.glob(pattern)
for path in paths:
if path.startswith('.git/'):
continue
if os.path.isdir(path):
def onerror(fun, path, excinfo):
exc = excinfo[1]
if exc.errno != errno.ENOENT:
raise
safe_print("rmdir -f %s" % path)
shutil.rmtree(path, onerror=onerror)
else:
safe_print("rm %s" % path)
os.remove(path)
def test_setup():
os.environ['PYTHONWARNINGS'] = 'all'
def install_pip():
try:
import pip # NOQA
except ImportError:
sh("%s %s" % (PYTHON,
os.path.join(DATA_DIR, "get-pip.py")))
def install_setuptools():
try:
import setuptools # NOQA
except ImportError:
sh('%s -c "import setuptools"' % PYTHON)
# ===================================================================
# commands
# ===================================================================
@cmd
def help():
"""Print this help"""
safe_print('Run "make [-p <PYTHON>] <target>" where <target> is one of:')
for name in sorted(_cmds):
safe_print(
" %-20s %s" % (name.replace('_', '-'), _cmds[name] or ''))
sys.exit(1)
@cmd
def build():
"""Build / compile"""
# Make sure setuptools is installed (needed for 'develop' /
# edit mode).
install_setuptools()
sh("%s setup.py build" % PYTHON)
sh("%s setup.py build_ext -i" % PYTHON)
sh('%s -c "import %s"' % (PYTHON, PRJNAME))
@cmd
def install():
"""Install in develop / edit mode"""
build()
sh("%s setup.py develop" % PYTHON)
@cmd
def uninstall():
"""Uninstall %s""" % PRJNAME
clean()
install_pip()
here = os.getcwd()
try:
os.chdir('C:\\')
while True:
try:
__import__(PRJNAME, fromlist=[' '])
except ImportError:
break
else:
sh("%s -m pip uninstall -y %s" % (PYTHON, PRJNAME))
finally:
os.chdir(here)
for dir in site.getsitepackages():
for name in os.listdir(dir):
if name.startswith(PRJNAME):
rm(os.path.join(dir, name))
@cmd
def clean():
"""Deletes dev files"""
rm("$testfn*")
rm("*.bak")
rm("*.core")
rm("*.egg-info")
rm("*.orig")
rm("*.pyc")
rm("*.pyd")
rm("*.pyo")
rm("*.rej")
rm("*.so")
rm("*.~")
rm("*__pycache__")
rm(".coverage")
rm(".tox")
rm(".coverage")
rm("build")
rm("dist")
rm("docs/_build")
rm("htmlcov")
rm("tmp")
rm("venv")
@cmd
def pydeps():
"""Install useful deps"""
install_pip()
install_setuptools()
sh("%s -m pip install -U -r %s" % (PYTHON, REQUIREMENTS_TXT))
@cmd
def lint():
"""Run flake8 against all py files"""
py_files = subprocess.check_output("git ls-files")
if PY3:
py_files = py_files.decode()
py_files = [x for x in py_files.split() if x.endswith('.py')]
py_files = ' '.join(py_files)
sh("%s -m flake8 %s" % (PYTHON, py_files), nolog=True)
@cmd
def test():
"""Run tests"""
install()
test_setup()
sh("%s %s" % (PYTHON, TEST_SCRIPT))
@cmd
def ci():
"""Run CI tests."""
pydeps()
test()
pyinstaller()
@cmd
@cmd
def test_by_name():
"""Run test by name"""
try:
safe_print(sys.argv)
name = sys.argv[2]
except IndexError:
sys.exit('second arg missing')
install()
test_setup()
sh("%s -m unittest -v %s" % (PYTHON, name))
def set_python(s):
global PYTHON
if os.path.isabs(s):
PYTHON = s
else:
# try to look for a python installation
orig = s
s = s.replace('.', '')
vers = ('26', '27', '34', '35', '36', '37',
'26-64', '27-64', '34-64', '35-64', '36-64', '37-64')
for v in vers:
if s == v:
path = 'C:\\python%s\\python.exe' % s
if os.path.isfile(path):
print(path)
PYTHON = path
os.putenv('PYTHON', path)
return
return sys.exit(
"can't find any python installation matching %r" % orig)
def is_windows64():
return 'PROGRAMFILES(X86)' in os.environ
def venv():
"""Install venv + deps."""
try:
import virtualenv # NOQA
except ImportError:
sh("%s -m pip install virtualenv" % PYTHON)
if not os.path.isdir("venv"):
sh("%s -m virtualenv venv" % PYTHON)
sh("venv\\Scripts\\pip install -r %s" % (REQUIREMENTS_TXT))
@cmd
def pyinstaller():
"""Creates a stand alone Windows as dist/%s.exe.""" % PRJNAME
def assertMultiLineEqual(a, b):
import unittest
tc = unittest.TestCase('__init__')
tc.assertMultiLineEqual(a, b)
def install_deps():
sh("venv\\Scripts\\python -m pip install pyinstaller pypiwin32")
sh("venv\\Scripts\\python -m pip install "
"https://github.com/mattgwwalker/msg-extractor/zipball/"
"master#egg=ExtractMsg")
sh("venv\\Scripts\\python setup.py install")
def run_pyinstaller():
rm(os.path.join(ROOT_DIR, "dist"))
bindir = os.path.join(
DATA_DIR, "bin64" if is_windows64() else "bin32")
assert os.path.exists(bindir), bindir
sh("venv\\Scripts\\pyinstaller --upx-dir=%s pyinstaller.spec" % bindir)
def test_exe():
# Make sure the resulting .exe works.
exe = os.path.join(ROOT_DIR, "dist", "%s.exe" % PRJNAME)
assert os.path.exists(exe), exe
# Test those extensions for which we know we rely on external exes.
out = sh("%s extract %s" % (
exe, os.path.join(ROOT_DIR, "fulltext/test/files/test.pdf")))
assertMultiLineEqual(out.strip(), TEXT.strip())
out = sh("%s extract %s" % (
exe, os.path.join(ROOT_DIR, "fulltext/test/files/test.rtf")))
assertMultiLineEqual(out.strip(), TEXT.strip())
venv()
install_deps()
run_pyinstaller()
test_exe()
def parse_cmdline():
if '-p' in sys.argv:
try:
pos = sys.argv.index('-p')
sys.argv.pop(pos)
py = sys.argv.pop(pos)
except IndexError:
return help()
set_python(py)
def main():
parse_cmdline()
try:
cmd = sys.argv[1].replace('-', '_')
except IndexError:
return help()
if cmd in _cmds:
fun = getattr(sys.modules[__name__], cmd)
fun()
else:
help()
if __name__ == '__main__':
main()
|
btimby/fulltext | fulltext/data/winmake.py | venv | python | def venv():
try:
import virtualenv # NOQA
except ImportError:
sh("%s -m pip install virtualenv" % PYTHON)
if not os.path.isdir("venv"):
sh("%s -m virtualenv venv" % PYTHON)
sh("venv\\Scripts\\pip install -r %s" % (REQUIREMENTS_TXT)) | Install venv + deps. | train | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/data/winmake.py#L311-L319 | [
"def sh(cmd, nolog=False):\n if not nolog:\n safe_print(\"cmd: \" + cmd)\n p = subprocess.Popen(cmd, shell=True, env=os.environ, cwd=os.getcwd(),\n stdout=subprocess.PIPE)\n out, _ = p.communicate()\n if PY3:\n out = out.decode(sys.stdout.encoding, sys.stdout.errors)\n print(out)\n if p.returncode != 0:\n sys.exit(p.returncode)\n return out\n"
] | #!/usr/bin/env python
# Copyright (c) 2009 Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Shortcuts for various tasks, emulating UNIX "make" on Windows.
This is supposed to be invoked by "make.bat" and not used directly.
This was originally written as a bat file but they suck so much
that they should be deemed illegal!
"""
from __future__ import print_function
import errno
import glob
import functools
import os
import shutil
import site
import subprocess
import sys
# --- configurable
PRJNAME = "fulltext"
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_SCRIPT = 'fulltext\\test\\__init__.py'
ROOT_DIR = os.path.realpath(os.path.join(HERE, "..", ".."))
DATA_DIR = os.path.join(ROOT_DIR, PRJNAME, "data")
REQUIREMENTS_TXT = "requirements.txt"
# --- others
TEXT_WITH_NEWLINES = u"Lorem ipsum\ndolor sit amet, consectetur adipiscing e" \
u"lit. Nunc ipsum augue, iaculis quis\nauctor eu, adipi" \
u"scing non est. Nullam id sem diam, eget varius dui. E" \
u"tiam\nsollicitudin sapien nec odio elementum sit amet" \
u" luctus magna volutpat. Ut\ncommodo nulla neque. Aliq" \
u"uam erat volutpat. Integer et nunc augue.\nPellentesq" \
u"ue habitant morbi tristique senectus et netus et male" \
u"suada fames\nac turpis egestas. Quisque at enim nulla" \
u", vel tincidunt urna. Nam leo\naugue, elementum ut vi" \
u"verra eget, scelerisque in purus. In arcu orci, porta" \
u"\nnec aliquet quis, pretium a sem. In fermentum nisl " \
u"id diam luctus viverra.\nNullam semper, metus at euis" \
u"mod vulputate, orci odio dignissim urna, quis\niaculi" \
u"s neque lacus ut tortor. Ut a justo non dolor venenat" \
u"is accumsan.\nProin dolor eros, aliquam id condimentu" \
u"m et, aliquam quis metus. Vivamus\neget purus diam."
TEXT = TEXT_WITH_NEWLINES.replace('\n', ' ')
PYTHON = sys.executable
PY3 = sys.version_info[0] == 3
_cmds = {}
if PY3:
basestring = str
# ===================================================================
# utils
# ===================================================================
def safe_print(text, file=sys.stdout, flush=False):
"""Prints a (unicode) string to the console, encoded depending on
the stdout/file encoding (eg. cp437 on Windows). This is to avoid
encoding errors in case of funky path names.
Works with Python 2 and 3.
"""
if not isinstance(text, basestring):
return print(text, file=file)
try:
file.write(text)
except UnicodeEncodeError:
bytes_string = text.encode(file.encoding, 'backslashreplace')
if hasattr(file, 'buffer'):
file.buffer.write(bytes_string)
else:
text = bytes_string.decode(file.encoding, 'strict')
file.write(text)
file.write("\n")
def sh(cmd, nolog=False):
if not nolog:
safe_print("cmd: " + cmd)
p = subprocess.Popen(cmd, shell=True, env=os.environ, cwd=os.getcwd(),
stdout=subprocess.PIPE)
out, _ = p.communicate()
if PY3:
out = out.decode(sys.stdout.encoding, sys.stdout.errors)
print(out)
if p.returncode != 0:
sys.exit(p.returncode)
return out
def cmd(fun):
@functools.wraps(fun)
def wrapper(*args, **kwds):
return fun(*args, **kwds)
_cmds[fun.__name__] = fun.__doc__
return wrapper
def rm(pattern):
"""Recursively remove a file or dir by pattern."""
paths = glob.glob(pattern)
for path in paths:
if path.startswith('.git/'):
continue
if os.path.isdir(path):
def onerror(fun, path, excinfo):
exc = excinfo[1]
if exc.errno != errno.ENOENT:
raise
safe_print("rmdir -f %s" % path)
shutil.rmtree(path, onerror=onerror)
else:
safe_print("rm %s" % path)
os.remove(path)
def test_setup():
os.environ['PYTHONWARNINGS'] = 'all'
def install_pip():
try:
import pip # NOQA
except ImportError:
sh("%s %s" % (PYTHON,
os.path.join(DATA_DIR, "get-pip.py")))
def install_setuptools():
try:
import setuptools # NOQA
except ImportError:
sh('%s -c "import setuptools"' % PYTHON)
# ===================================================================
# commands
# ===================================================================
@cmd
def help():
"""Print this help"""
safe_print('Run "make [-p <PYTHON>] <target>" where <target> is one of:')
for name in sorted(_cmds):
safe_print(
" %-20s %s" % (name.replace('_', '-'), _cmds[name] or ''))
sys.exit(1)
@cmd
def build():
"""Build / compile"""
# Make sure setuptools is installed (needed for 'develop' /
# edit mode).
install_setuptools()
sh("%s setup.py build" % PYTHON)
sh("%s setup.py build_ext -i" % PYTHON)
sh('%s -c "import %s"' % (PYTHON, PRJNAME))
@cmd
def install():
"""Install in develop / edit mode"""
build()
sh("%s setup.py develop" % PYTHON)
@cmd
def uninstall():
"""Uninstall %s""" % PRJNAME
clean()
install_pip()
here = os.getcwd()
try:
os.chdir('C:\\')
while True:
try:
__import__(PRJNAME, fromlist=[' '])
except ImportError:
break
else:
sh("%s -m pip uninstall -y %s" % (PYTHON, PRJNAME))
finally:
os.chdir(here)
for dir in site.getsitepackages():
for name in os.listdir(dir):
if name.startswith(PRJNAME):
rm(os.path.join(dir, name))
@cmd
def clean():
"""Deletes dev files"""
rm("$testfn*")
rm("*.bak")
rm("*.core")
rm("*.egg-info")
rm("*.orig")
rm("*.pyc")
rm("*.pyd")
rm("*.pyo")
rm("*.rej")
rm("*.so")
rm("*.~")
rm("*__pycache__")
rm(".coverage")
rm(".tox")
rm(".coverage")
rm("build")
rm("dist")
rm("docs/_build")
rm("htmlcov")
rm("tmp")
rm("venv")
@cmd
def pydeps():
"""Install useful deps"""
install_pip()
install_setuptools()
sh("%s -m pip install -U -r %s" % (PYTHON, REQUIREMENTS_TXT))
@cmd
def lint():
"""Run flake8 against all py files"""
py_files = subprocess.check_output("git ls-files")
if PY3:
py_files = py_files.decode()
py_files = [x for x in py_files.split() if x.endswith('.py')]
py_files = ' '.join(py_files)
sh("%s -m flake8 %s" % (PYTHON, py_files), nolog=True)
@cmd
def test():
"""Run tests"""
install()
test_setup()
sh("%s %s" % (PYTHON, TEST_SCRIPT))
@cmd
def ci():
"""Run CI tests."""
pydeps()
test()
pyinstaller()
@cmd
def coverage():
"""Run coverage tests."""
# Note: coverage options are controlled by .coveragerc file
install()
test_setup()
sh("%s -m coverage run %s" % (PYTHON, TEST_SCRIPT))
sh("%s -m coverage report" % PYTHON)
sh("%s -m coverage html" % PYTHON)
sh("%s -m webbrowser -t htmlcov/index.html" % PYTHON)
@cmd
def test_by_name():
"""Run test by name"""
try:
safe_print(sys.argv)
name = sys.argv[2]
except IndexError:
sys.exit('second arg missing')
install()
test_setup()
sh("%s -m unittest -v %s" % (PYTHON, name))
def set_python(s):
global PYTHON
if os.path.isabs(s):
PYTHON = s
else:
# try to look for a python installation
orig = s
s = s.replace('.', '')
vers = ('26', '27', '34', '35', '36', '37',
'26-64', '27-64', '34-64', '35-64', '36-64', '37-64')
for v in vers:
if s == v:
path = 'C:\\python%s\\python.exe' % s
if os.path.isfile(path):
print(path)
PYTHON = path
os.putenv('PYTHON', path)
return
return sys.exit(
"can't find any python installation matching %r" % orig)
def is_windows64():
return 'PROGRAMFILES(X86)' in os.environ
@cmd
def pyinstaller():
"""Creates a stand alone Windows as dist/%s.exe.""" % PRJNAME
def assertMultiLineEqual(a, b):
import unittest
tc = unittest.TestCase('__init__')
tc.assertMultiLineEqual(a, b)
def install_deps():
sh("venv\\Scripts\\python -m pip install pyinstaller pypiwin32")
sh("venv\\Scripts\\python -m pip install "
"https://github.com/mattgwwalker/msg-extractor/zipball/"
"master#egg=ExtractMsg")
sh("venv\\Scripts\\python setup.py install")
def run_pyinstaller():
rm(os.path.join(ROOT_DIR, "dist"))
bindir = os.path.join(
DATA_DIR, "bin64" if is_windows64() else "bin32")
assert os.path.exists(bindir), bindir
sh("venv\\Scripts\\pyinstaller --upx-dir=%s pyinstaller.spec" % bindir)
def test_exe():
# Make sure the resulting .exe works.
exe = os.path.join(ROOT_DIR, "dist", "%s.exe" % PRJNAME)
assert os.path.exists(exe), exe
# Test those extensions for which we know we rely on external exes.
out = sh("%s extract %s" % (
exe, os.path.join(ROOT_DIR, "fulltext/test/files/test.pdf")))
assertMultiLineEqual(out.strip(), TEXT.strip())
out = sh("%s extract %s" % (
exe, os.path.join(ROOT_DIR, "fulltext/test/files/test.rtf")))
assertMultiLineEqual(out.strip(), TEXT.strip())
venv()
install_deps()
run_pyinstaller()
test_exe()
def parse_cmdline():
if '-p' in sys.argv:
try:
pos = sys.argv.index('-p')
sys.argv.pop(pos)
py = sys.argv.pop(pos)
except IndexError:
return help()
set_python(py)
def main():
parse_cmdline()
try:
cmd = sys.argv[1].replace('-', '_')
except IndexError:
return help()
if cmd in _cmds:
fun = getattr(sys.modules[__name__], cmd)
fun()
else:
help()
if __name__ == '__main__':
main()
|
aio-libs/aioodbc | aioodbc/cursor.py | Cursor.close | python | async def close(self):
if self._conn is None:
return
await self._run_operation(self._impl.close)
self._conn = None | Close the cursor now (rather than whenever __del__ is called).
The cursor will be unusable from this point forward; an Error
(or subclass) exception will be raised if any operation is attempted
with the cursor. | train | https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/cursor.py#L96-L106 | [
"def _run_operation(self, func, *args, **kwargs):\n # execute func in thread pool of attached to cursor connection\n if not self._conn:\n raise pyodbc.OperationalError('Cursor is closed.')\n future = self._conn._execute(func, *args, **kwargs)\n return future\n"
] | class Cursor:
"""Cursors represent a database cursor (and map to ODBC HSTMTs), which
is used to manage the context of a fetch operation.
Cursors created from the same connection are not isolated, i.e., any
changes made to the database by a cursor are immediately visible by
the other cursors.
"""
def __init__(self, pyodbc_cursor, connection, echo=False):
self._conn = connection
self._impl = pyodbc_cursor
self._loop = connection.loop
self._echo = echo
def _run_operation(self, func, *args, **kwargs):
# execute func in thread pool of attached to cursor connection
if not self._conn:
raise pyodbc.OperationalError('Cursor is closed.')
future = self._conn._execute(func, *args, **kwargs)
return future
@property
def echo(self):
"""Return echo mode status."""
return self._echo
@property
def connection(self):
"""Cursors database connection"""
return self._conn
@property
def autocommit(self):
"""Show autocommit mode for current database session. True if
connection is in autocommit mode; False otherwse. The default
is False.
"""
return self._conn.autocommit
@property
def rowcount(self):
"""The number of rows modified by the previous DDL statement.
This is -1 if no SQL has been executed or if the number of rows is
unknown. Note that it is not uncommon for databases to report -1
after a select statement for performance reasons. (The exact number
may not be known before the first records are returned to the
application.)
"""
return self._impl.rowcount
@property
def description(self):
"""This read-only attribute is a list of 7-item tuples, each
containing (name, type_code, display_size, internal_size, precision,
scale, null_ok).
pyodbc only provides values for name, type_code, internal_size,
and null_ok. The other values are set to None.
This attribute will be None for operations that do not return rows
or if one of the execute methods has not been called.
The type_code member is the class type used to create the Python
objects when reading rows. For example, a varchar column's type will
be str.
"""
return self._impl.description
@property
def closed(self):
"""Read only property indicates if cursor has been closed"""
return self._conn is None
@property
def arraysize(self):
"""This read/write attribute specifies the number of rows to fetch
at a time with .fetchmany() . It defaults to 1 meaning to fetch a
single row at a time.
"""
return self._impl.arraysize
@arraysize.setter
def arraysize(self, size):
self._impl.arraysize = size
async def execute(self, sql, *params):
"""Executes the given operation substituting any markers with
the given parameters.
:param sql: the SQL statement to execute with optional ? parameter
markers. Note that pyodbc never modifies the SQL statement.
:param params: optional parameters for the markers in the SQL. They
can be passed in a single sequence as defined by the DB API.
For convenience, however, they can also be passed individually
"""
if self._echo:
logger.info(sql)
logger.info("%r", sql)
await self._run_operation(self._impl.execute, sql, *params)
return self
def executemany(self, sql, *params):
"""Prepare a database query or command and then execute it against
all parameter sequences found in the sequence seq_of_params.
:param sql: the SQL statement to execute with optional ? parameters
:param params: sequence parameters for the markers in the SQL.
"""
fut = self._run_operation(self._impl.executemany, sql, *params)
return fut
def callproc(self, procname, args=()):
raise NotImplementedError
async def setinputsizes(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
async def setoutputsize(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
def fetchone(self):
"""Returns the next row or None when no more data is available.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
"""
fut = self._run_operation(self._impl.fetchone)
return fut
def fetchall(self):
"""Returns a list of all remaining rows.
Since this reads all rows into memory, it should not be used if
there are a lot of rows. Consider iterating over the rows instead.
However, it is useful for freeing up a Cursor so you can perform a
second query before processing the resulting rows.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT statement)
"""
fut = self._run_operation(self._impl.fetchall)
return fut
def fetchmany(self, size):
"""Returns a list of remaining rows, containing no more than size
rows, used to process results in chunks. The list will be empty when
there are no more rows.
The default for cursor.arraysize is 1 which is no different than
calling fetchone().
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
:param size: int, max number of rows to return
"""
fut = self._run_operation(self._impl.fetchmany, size)
return fut
def nextset(self):
"""This method will make the cursor skip to the next available
set, discarding any remaining rows from the current set.
If there are no more sets, the method returns None. Otherwise,
it returns a true value and subsequent calls to the fetch methods
will return rows from the next result set.
This method is primarily used if you have stored procedures that
return multiple results.
"""
fut = self._run_operation(self._impl.nextset)
return fut
def tables(self, **kw):
"""Creates a result set of tables in the database that match the
given criteria.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param tableType: one of TABLE, VIEW, SYSTEM TABLE ...
"""
fut = self._run_operation(self._impl.tables, **kw)
return fut
def columns(self, **kw):
"""Creates a results set of column names in specified tables by
executing the ODBC SQLColumns function. Each row fetched has the
following columns.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param column: string search pattern for column names.
"""
fut = self._run_operation(self._impl.columns, **kw)
return fut
def statistics(self, catalog=None, schema=None, unique=False, quick=True):
"""Creates a results set of statistics about a single table and
the indexes associated with the table by executing SQLStatistics.
:param catalog: the catalog name
:param schema: the schmea name
:param unique: if True, only unique indexes are retured. Otherwise
all indexes are returned.
:param quick: if True, CARDINALITY and PAGES are returned only if
they are readily available from the server
"""
fut = self._run_operation(self._impl.statistics, catalog=catalog,
schema=schema, unique=unique, quick=quick)
return fut
def rowIdColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_BEST_ROWID which creates a
result set of columns that uniquely identify a row
"""
fut = self._run_operation(self._impl.rowIdColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def rowVerColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_ROWVER which creates a
result set of columns that are automatically updated when any
value in the row is updated.
"""
fut = self._run_operation(self._impl.rowVerColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def primaryKeys(self, table, catalog=None, schema=None): # nopep8
"""Creates a result set of column names that make up the primary key
for a table by executing the SQLPrimaryKeys function."""
fut = self._run_operation(self._impl.primaryKeys, table,
catalog=catalog, schema=schema)
return fut
def foreignKeys(self, *a, **kw): # nopep8
"""Executes the SQLForeignKeys function and creates a result set
of column names that are foreign keys in the specified table (columns
in the specified table that refer to primary keys in other tables)
or foreign keys in other tables that refer to the primary key in
the specified table.
"""
fut = self._run_operation(self._impl.foreignKeys, *a, **kw)
return fut
def getTypeInfo(self, sql_type): # nopep8
"""Executes SQLGetTypeInfo a creates a result set with information
about the specified data type or all data types supported by the
ODBC driver if not specified.
"""
fut = self._run_operation(self._impl.getTypeInfo, sql_type)
return fut
def procedures(self, *a, **kw):
"""Executes SQLProcedures and creates a result set of information
about the procedures in the data source.
"""
fut = self._run_operation(self._impl.procedures, *a, **kw)
return fut
def procedureColumns(self, *a, **kw): # nopep8
fut = self._run_operation(self._impl.procedureColumns, *a, **kw)
return fut
def skip(self, count):
fut = self._run_operation(self._impl.skip, count)
return fut
def commit(self):
fut = self._run_operation(self._impl.commit)
return fut
def rollback(self):
fut = self._run_operation(self._impl.rollback)
return fut
if PY_352:
def __aiter__(self):
return self
else:
async def __aiter__(self):
return self
async def __anext__(self):
ret = await self.fetchone()
if ret is not None:
return ret
else:
raise StopAsyncIteration
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
return
|
aio-libs/aioodbc | aioodbc/cursor.py | Cursor.execute | python | async def execute(self, sql, *params):
if self._echo:
logger.info(sql)
logger.info("%r", sql)
await self._run_operation(self._impl.execute, sql, *params)
return self | Executes the given operation substituting any markers with
the given parameters.
:param sql: the SQL statement to execute with optional ? parameter
markers. Note that pyodbc never modifies the SQL statement.
:param params: optional parameters for the markers in the SQL. They
can be passed in a single sequence as defined by the DB API.
For convenience, however, they can also be passed individually | train | https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/cursor.py#L108-L122 | [
"def _run_operation(self, func, *args, **kwargs):\n # execute func in thread pool of attached to cursor connection\n if not self._conn:\n raise pyodbc.OperationalError('Cursor is closed.')\n future = self._conn._execute(func, *args, **kwargs)\n return future\n"
] | class Cursor:
"""Cursors represent a database cursor (and map to ODBC HSTMTs), which
is used to manage the context of a fetch operation.
Cursors created from the same connection are not isolated, i.e., any
changes made to the database by a cursor are immediately visible by
the other cursors.
"""
def __init__(self, pyodbc_cursor, connection, echo=False):
self._conn = connection
self._impl = pyodbc_cursor
self._loop = connection.loop
self._echo = echo
def _run_operation(self, func, *args, **kwargs):
# execute func in thread pool of attached to cursor connection
if not self._conn:
raise pyodbc.OperationalError('Cursor is closed.')
future = self._conn._execute(func, *args, **kwargs)
return future
@property
def echo(self):
"""Return echo mode status."""
return self._echo
@property
def connection(self):
"""Cursors database connection"""
return self._conn
@property
def autocommit(self):
"""Show autocommit mode for current database session. True if
connection is in autocommit mode; False otherwse. The default
is False.
"""
return self._conn.autocommit
@property
def rowcount(self):
"""The number of rows modified by the previous DDL statement.
This is -1 if no SQL has been executed or if the number of rows is
unknown. Note that it is not uncommon for databases to report -1
after a select statement for performance reasons. (The exact number
may not be known before the first records are returned to the
application.)
"""
return self._impl.rowcount
@property
def description(self):
"""This read-only attribute is a list of 7-item tuples, each
containing (name, type_code, display_size, internal_size, precision,
scale, null_ok).
pyodbc only provides values for name, type_code, internal_size,
and null_ok. The other values are set to None.
This attribute will be None for operations that do not return rows
or if one of the execute methods has not been called.
The type_code member is the class type used to create the Python
objects when reading rows. For example, a varchar column's type will
be str.
"""
return self._impl.description
@property
def closed(self):
"""Read only property indicates if cursor has been closed"""
return self._conn is None
@property
def arraysize(self):
"""This read/write attribute specifies the number of rows to fetch
at a time with .fetchmany() . It defaults to 1 meaning to fetch a
single row at a time.
"""
return self._impl.arraysize
@arraysize.setter
def arraysize(self, size):
self._impl.arraysize = size
async def close(self):
"""Close the cursor now (rather than whenever __del__ is called).
The cursor will be unusable from this point forward; an Error
(or subclass) exception will be raised if any operation is attempted
with the cursor.
"""
if self._conn is None:
return
await self._run_operation(self._impl.close)
self._conn = None
def executemany(self, sql, *params):
"""Prepare a database query or command and then execute it against
all parameter sequences found in the sequence seq_of_params.
:param sql: the SQL statement to execute with optional ? parameters
:param params: sequence parameters for the markers in the SQL.
"""
fut = self._run_operation(self._impl.executemany, sql, *params)
return fut
def callproc(self, procname, args=()):
raise NotImplementedError
async def setinputsizes(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
async def setoutputsize(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
def fetchone(self):
"""Returns the next row or None when no more data is available.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
"""
fut = self._run_operation(self._impl.fetchone)
return fut
def fetchall(self):
"""Returns a list of all remaining rows.
Since this reads all rows into memory, it should not be used if
there are a lot of rows. Consider iterating over the rows instead.
However, it is useful for freeing up a Cursor so you can perform a
second query before processing the resulting rows.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT statement)
"""
fut = self._run_operation(self._impl.fetchall)
return fut
def fetchmany(self, size):
"""Returns a list of remaining rows, containing no more than size
rows, used to process results in chunks. The list will be empty when
there are no more rows.
The default for cursor.arraysize is 1 which is no different than
calling fetchone().
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
:param size: int, max number of rows to return
"""
fut = self._run_operation(self._impl.fetchmany, size)
return fut
def nextset(self):
"""This method will make the cursor skip to the next available
set, discarding any remaining rows from the current set.
If there are no more sets, the method returns None. Otherwise,
it returns a true value and subsequent calls to the fetch methods
will return rows from the next result set.
This method is primarily used if you have stored procedures that
return multiple results.
"""
fut = self._run_operation(self._impl.nextset)
return fut
def tables(self, **kw):
"""Creates a result set of tables in the database that match the
given criteria.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param tableType: one of TABLE, VIEW, SYSTEM TABLE ...
"""
fut = self._run_operation(self._impl.tables, **kw)
return fut
def columns(self, **kw):
"""Creates a results set of column names in specified tables by
executing the ODBC SQLColumns function. Each row fetched has the
following columns.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param column: string search pattern for column names.
"""
fut = self._run_operation(self._impl.columns, **kw)
return fut
def statistics(self, catalog=None, schema=None, unique=False, quick=True):
"""Creates a results set of statistics about a single table and
the indexes associated with the table by executing SQLStatistics.
:param catalog: the catalog name
:param schema: the schmea name
:param unique: if True, only unique indexes are retured. Otherwise
all indexes are returned.
:param quick: if True, CARDINALITY and PAGES are returned only if
they are readily available from the server
"""
fut = self._run_operation(self._impl.statistics, catalog=catalog,
schema=schema, unique=unique, quick=quick)
return fut
def rowIdColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_BEST_ROWID which creates a
result set of columns that uniquely identify a row
"""
fut = self._run_operation(self._impl.rowIdColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def rowVerColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_ROWVER which creates a
result set of columns that are automatically updated when any
value in the row is updated.
"""
fut = self._run_operation(self._impl.rowVerColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def primaryKeys(self, table, catalog=None, schema=None): # nopep8
"""Creates a result set of column names that make up the primary key
for a table by executing the SQLPrimaryKeys function."""
fut = self._run_operation(self._impl.primaryKeys, table,
catalog=catalog, schema=schema)
return fut
def foreignKeys(self, *a, **kw): # nopep8
"""Executes the SQLForeignKeys function and creates a result set
of column names that are foreign keys in the specified table (columns
in the specified table that refer to primary keys in other tables)
or foreign keys in other tables that refer to the primary key in
the specified table.
"""
fut = self._run_operation(self._impl.foreignKeys, *a, **kw)
return fut
def getTypeInfo(self, sql_type): # nopep8
"""Executes SQLGetTypeInfo a creates a result set with information
about the specified data type or all data types supported by the
ODBC driver if not specified.
"""
fut = self._run_operation(self._impl.getTypeInfo, sql_type)
return fut
def procedures(self, *a, **kw):
"""Executes SQLProcedures and creates a result set of information
about the procedures in the data source.
"""
fut = self._run_operation(self._impl.procedures, *a, **kw)
return fut
def procedureColumns(self, *a, **kw): # nopep8
fut = self._run_operation(self._impl.procedureColumns, *a, **kw)
return fut
def skip(self, count):
fut = self._run_operation(self._impl.skip, count)
return fut
def commit(self):
fut = self._run_operation(self._impl.commit)
return fut
def rollback(self):
fut = self._run_operation(self._impl.rollback)
return fut
if PY_352:
def __aiter__(self):
return self
else:
async def __aiter__(self):
return self
async def __anext__(self):
ret = await self.fetchone()
if ret is not None:
return ret
else:
raise StopAsyncIteration
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
return
|
aio-libs/aioodbc | aioodbc/cursor.py | Cursor.executemany | python | def executemany(self, sql, *params):
fut = self._run_operation(self._impl.executemany, sql, *params)
return fut | Prepare a database query or command and then execute it against
all parameter sequences found in the sequence seq_of_params.
:param sql: the SQL statement to execute with optional ? parameters
:param params: sequence parameters for the markers in the SQL. | train | https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/cursor.py#L124-L132 | [
"def _run_operation(self, func, *args, **kwargs):\n # execute func in thread pool of attached to cursor connection\n if not self._conn:\n raise pyodbc.OperationalError('Cursor is closed.')\n future = self._conn._execute(func, *args, **kwargs)\n return future\n"
] | class Cursor:
"""Cursors represent a database cursor (and map to ODBC HSTMTs), which
is used to manage the context of a fetch operation.
Cursors created from the same connection are not isolated, i.e., any
changes made to the database by a cursor are immediately visible by
the other cursors.
"""
def __init__(self, pyodbc_cursor, connection, echo=False):
self._conn = connection
self._impl = pyodbc_cursor
self._loop = connection.loop
self._echo = echo
def _run_operation(self, func, *args, **kwargs):
# execute func in thread pool of attached to cursor connection
if not self._conn:
raise pyodbc.OperationalError('Cursor is closed.')
future = self._conn._execute(func, *args, **kwargs)
return future
@property
def echo(self):
"""Return echo mode status."""
return self._echo
@property
def connection(self):
"""Cursors database connection"""
return self._conn
@property
def autocommit(self):
"""Show autocommit mode for current database session. True if
connection is in autocommit mode; False otherwse. The default
is False.
"""
return self._conn.autocommit
@property
def rowcount(self):
"""The number of rows modified by the previous DDL statement.
This is -1 if no SQL has been executed or if the number of rows is
unknown. Note that it is not uncommon for databases to report -1
after a select statement for performance reasons. (The exact number
may not be known before the first records are returned to the
application.)
"""
return self._impl.rowcount
@property
def description(self):
"""This read-only attribute is a list of 7-item tuples, each
containing (name, type_code, display_size, internal_size, precision,
scale, null_ok).
pyodbc only provides values for name, type_code, internal_size,
and null_ok. The other values are set to None.
This attribute will be None for operations that do not return rows
or if one of the execute methods has not been called.
The type_code member is the class type used to create the Python
objects when reading rows. For example, a varchar column's type will
be str.
"""
return self._impl.description
@property
def closed(self):
"""Read only property indicates if cursor has been closed"""
return self._conn is None
@property
def arraysize(self):
"""This read/write attribute specifies the number of rows to fetch
at a time with .fetchmany() . It defaults to 1 meaning to fetch a
single row at a time.
"""
return self._impl.arraysize
@arraysize.setter
def arraysize(self, size):
self._impl.arraysize = size
async def close(self):
"""Close the cursor now (rather than whenever __del__ is called).
The cursor will be unusable from this point forward; an Error
(or subclass) exception will be raised if any operation is attempted
with the cursor.
"""
if self._conn is None:
return
await self._run_operation(self._impl.close)
self._conn = None
async def execute(self, sql, *params):
"""Executes the given operation substituting any markers with
the given parameters.
:param sql: the SQL statement to execute with optional ? parameter
markers. Note that pyodbc never modifies the SQL statement.
:param params: optional parameters for the markers in the SQL. They
can be passed in a single sequence as defined by the DB API.
For convenience, however, they can also be passed individually
"""
if self._echo:
logger.info(sql)
logger.info("%r", sql)
await self._run_operation(self._impl.execute, sql, *params)
return self
def callproc(self, procname, args=()):
raise NotImplementedError
async def setinputsizes(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
async def setoutputsize(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
def fetchone(self):
"""Returns the next row or None when no more data is available.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
"""
fut = self._run_operation(self._impl.fetchone)
return fut
def fetchall(self):
"""Returns a list of all remaining rows.
Since this reads all rows into memory, it should not be used if
there are a lot of rows. Consider iterating over the rows instead.
However, it is useful for freeing up a Cursor so you can perform a
second query before processing the resulting rows.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT statement)
"""
fut = self._run_operation(self._impl.fetchall)
return fut
def fetchmany(self, size):
"""Returns a list of remaining rows, containing no more than size
rows, used to process results in chunks. The list will be empty when
there are no more rows.
The default for cursor.arraysize is 1 which is no different than
calling fetchone().
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
:param size: int, max number of rows to return
"""
fut = self._run_operation(self._impl.fetchmany, size)
return fut
def nextset(self):
"""This method will make the cursor skip to the next available
set, discarding any remaining rows from the current set.
If there are no more sets, the method returns None. Otherwise,
it returns a true value and subsequent calls to the fetch methods
will return rows from the next result set.
This method is primarily used if you have stored procedures that
return multiple results.
"""
fut = self._run_operation(self._impl.nextset)
return fut
def tables(self, **kw):
"""Creates a result set of tables in the database that match the
given criteria.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param tableType: one of TABLE, VIEW, SYSTEM TABLE ...
"""
fut = self._run_operation(self._impl.tables, **kw)
return fut
def columns(self, **kw):
"""Creates a results set of column names in specified tables by
executing the ODBC SQLColumns function. Each row fetched has the
following columns.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param column: string search pattern for column names.
"""
fut = self._run_operation(self._impl.columns, **kw)
return fut
def statistics(self, catalog=None, schema=None, unique=False, quick=True):
"""Creates a results set of statistics about a single table and
the indexes associated with the table by executing SQLStatistics.
:param catalog: the catalog name
:param schema: the schmea name
:param unique: if True, only unique indexes are retured. Otherwise
all indexes are returned.
:param quick: if True, CARDINALITY and PAGES are returned only if
they are readily available from the server
"""
fut = self._run_operation(self._impl.statistics, catalog=catalog,
schema=schema, unique=unique, quick=quick)
return fut
def rowIdColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_BEST_ROWID which creates a
result set of columns that uniquely identify a row
"""
fut = self._run_operation(self._impl.rowIdColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def rowVerColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_ROWVER which creates a
result set of columns that are automatically updated when any
value in the row is updated.
"""
fut = self._run_operation(self._impl.rowVerColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def primaryKeys(self, table, catalog=None, schema=None): # nopep8
"""Creates a result set of column names that make up the primary key
for a table by executing the SQLPrimaryKeys function."""
fut = self._run_operation(self._impl.primaryKeys, table,
catalog=catalog, schema=schema)
return fut
def foreignKeys(self, *a, **kw): # nopep8
"""Executes the SQLForeignKeys function and creates a result set
of column names that are foreign keys in the specified table (columns
in the specified table that refer to primary keys in other tables)
or foreign keys in other tables that refer to the primary key in
the specified table.
"""
fut = self._run_operation(self._impl.foreignKeys, *a, **kw)
return fut
def getTypeInfo(self, sql_type): # nopep8
"""Executes SQLGetTypeInfo a creates a result set with information
about the specified data type or all data types supported by the
ODBC driver if not specified.
"""
fut = self._run_operation(self._impl.getTypeInfo, sql_type)
return fut
def procedures(self, *a, **kw):
"""Executes SQLProcedures and creates a result set of information
about the procedures in the data source.
"""
fut = self._run_operation(self._impl.procedures, *a, **kw)
return fut
def procedureColumns(self, *a, **kw): # nopep8
fut = self._run_operation(self._impl.procedureColumns, *a, **kw)
return fut
def skip(self, count):
fut = self._run_operation(self._impl.skip, count)
return fut
def commit(self):
fut = self._run_operation(self._impl.commit)
return fut
def rollback(self):
fut = self._run_operation(self._impl.rollback)
return fut
if PY_352:
def __aiter__(self):
return self
else:
async def __aiter__(self):
return self
async def __anext__(self):
ret = await self.fetchone()
if ret is not None:
return ret
else:
raise StopAsyncIteration
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
return
|
aio-libs/aioodbc | aioodbc/cursor.py | Cursor.fetchmany | python | def fetchmany(self, size):
fut = self._run_operation(self._impl.fetchmany, size)
return fut | Returns a list of remaining rows, containing no more than size
rows, used to process results in chunks. The list will be empty when
there are no more rows.
The default for cursor.arraysize is 1 which is no different than
calling fetchone().
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
:param size: int, max number of rows to return | train | https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/cursor.py#L169-L184 | [
"def _run_operation(self, func, *args, **kwargs):\n # execute func in thread pool of attached to cursor connection\n if not self._conn:\n raise pyodbc.OperationalError('Cursor is closed.')\n future = self._conn._execute(func, *args, **kwargs)\n return future\n"
] | class Cursor:
"""Cursors represent a database cursor (and map to ODBC HSTMTs), which
is used to manage the context of a fetch operation.
Cursors created from the same connection are not isolated, i.e., any
changes made to the database by a cursor are immediately visible by
the other cursors.
"""
def __init__(self, pyodbc_cursor, connection, echo=False):
self._conn = connection
self._impl = pyodbc_cursor
self._loop = connection.loop
self._echo = echo
def _run_operation(self, func, *args, **kwargs):
# execute func in thread pool of attached to cursor connection
if not self._conn:
raise pyodbc.OperationalError('Cursor is closed.')
future = self._conn._execute(func, *args, **kwargs)
return future
@property
def echo(self):
"""Return echo mode status."""
return self._echo
@property
def connection(self):
"""Cursors database connection"""
return self._conn
@property
def autocommit(self):
"""Show autocommit mode for current database session. True if
connection is in autocommit mode; False otherwse. The default
is False.
"""
return self._conn.autocommit
@property
def rowcount(self):
"""The number of rows modified by the previous DDL statement.
This is -1 if no SQL has been executed or if the number of rows is
unknown. Note that it is not uncommon for databases to report -1
after a select statement for performance reasons. (The exact number
may not be known before the first records are returned to the
application.)
"""
return self._impl.rowcount
@property
def description(self):
"""This read-only attribute is a list of 7-item tuples, each
containing (name, type_code, display_size, internal_size, precision,
scale, null_ok).
pyodbc only provides values for name, type_code, internal_size,
and null_ok. The other values are set to None.
This attribute will be None for operations that do not return rows
or if one of the execute methods has not been called.
The type_code member is the class type used to create the Python
objects when reading rows. For example, a varchar column's type will
be str.
"""
return self._impl.description
@property
def closed(self):
"""Read only property indicates if cursor has been closed"""
return self._conn is None
@property
def arraysize(self):
"""This read/write attribute specifies the number of rows to fetch
at a time with .fetchmany() . It defaults to 1 meaning to fetch a
single row at a time.
"""
return self._impl.arraysize
@arraysize.setter
def arraysize(self, size):
self._impl.arraysize = size
async def close(self):
"""Close the cursor now (rather than whenever __del__ is called).
The cursor will be unusable from this point forward; an Error
(or subclass) exception will be raised if any operation is attempted
with the cursor.
"""
if self._conn is None:
return
await self._run_operation(self._impl.close)
self._conn = None
async def execute(self, sql, *params):
"""Executes the given operation substituting any markers with
the given parameters.
:param sql: the SQL statement to execute with optional ? parameter
markers. Note that pyodbc never modifies the SQL statement.
:param params: optional parameters for the markers in the SQL. They
can be passed in a single sequence as defined by the DB API.
For convenience, however, they can also be passed individually
"""
if self._echo:
logger.info(sql)
logger.info("%r", sql)
await self._run_operation(self._impl.execute, sql, *params)
return self
def executemany(self, sql, *params):
"""Prepare a database query or command and then execute it against
all parameter sequences found in the sequence seq_of_params.
:param sql: the SQL statement to execute with optional ? parameters
:param params: sequence parameters for the markers in the SQL.
"""
fut = self._run_operation(self._impl.executemany, sql, *params)
return fut
def callproc(self, procname, args=()):
raise NotImplementedError
async def setinputsizes(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
async def setoutputsize(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
def fetchone(self):
"""Returns the next row or None when no more data is available.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
"""
fut = self._run_operation(self._impl.fetchone)
return fut
def fetchall(self):
"""Returns a list of all remaining rows.
Since this reads all rows into memory, it should not be used if
there are a lot of rows. Consider iterating over the rows instead.
However, it is useful for freeing up a Cursor so you can perform a
second query before processing the resulting rows.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT statement)
"""
fut = self._run_operation(self._impl.fetchall)
return fut
def nextset(self):
"""This method will make the cursor skip to the next available
set, discarding any remaining rows from the current set.
If there are no more sets, the method returns None. Otherwise,
it returns a true value and subsequent calls to the fetch methods
will return rows from the next result set.
This method is primarily used if you have stored procedures that
return multiple results.
"""
fut = self._run_operation(self._impl.nextset)
return fut
def tables(self, **kw):
"""Creates a result set of tables in the database that match the
given criteria.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param tableType: one of TABLE, VIEW, SYSTEM TABLE ...
"""
fut = self._run_operation(self._impl.tables, **kw)
return fut
def columns(self, **kw):
"""Creates a results set of column names in specified tables by
executing the ODBC SQLColumns function. Each row fetched has the
following columns.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param column: string search pattern for column names.
"""
fut = self._run_operation(self._impl.columns, **kw)
return fut
def statistics(self, catalog=None, schema=None, unique=False, quick=True):
"""Creates a results set of statistics about a single table and
the indexes associated with the table by executing SQLStatistics.
:param catalog: the catalog name
:param schema: the schmea name
:param unique: if True, only unique indexes are retured. Otherwise
all indexes are returned.
:param quick: if True, CARDINALITY and PAGES are returned only if
they are readily available from the server
"""
fut = self._run_operation(self._impl.statistics, catalog=catalog,
schema=schema, unique=unique, quick=quick)
return fut
def rowIdColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_BEST_ROWID which creates a
result set of columns that uniquely identify a row
"""
fut = self._run_operation(self._impl.rowIdColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def rowVerColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_ROWVER which creates a
result set of columns that are automatically updated when any
value in the row is updated.
"""
fut = self._run_operation(self._impl.rowVerColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def primaryKeys(self, table, catalog=None, schema=None): # nopep8
"""Creates a result set of column names that make up the primary key
for a table by executing the SQLPrimaryKeys function."""
fut = self._run_operation(self._impl.primaryKeys, table,
catalog=catalog, schema=schema)
return fut
def foreignKeys(self, *a, **kw): # nopep8
"""Executes the SQLForeignKeys function and creates a result set
of column names that are foreign keys in the specified table (columns
in the specified table that refer to primary keys in other tables)
or foreign keys in other tables that refer to the primary key in
the specified table.
"""
fut = self._run_operation(self._impl.foreignKeys, *a, **kw)
return fut
def getTypeInfo(self, sql_type): # nopep8
"""Executes SQLGetTypeInfo a creates a result set with information
about the specified data type or all data types supported by the
ODBC driver if not specified.
"""
fut = self._run_operation(self._impl.getTypeInfo, sql_type)
return fut
def procedures(self, *a, **kw):
"""Executes SQLProcedures and creates a result set of information
about the procedures in the data source.
"""
fut = self._run_operation(self._impl.procedures, *a, **kw)
return fut
def procedureColumns(self, *a, **kw): # nopep8
fut = self._run_operation(self._impl.procedureColumns, *a, **kw)
return fut
def skip(self, count):
fut = self._run_operation(self._impl.skip, count)
return fut
def commit(self):
fut = self._run_operation(self._impl.commit)
return fut
def rollback(self):
fut = self._run_operation(self._impl.rollback)
return fut
if PY_352:
def __aiter__(self):
return self
else:
async def __aiter__(self):
return self
async def __anext__(self):
ret = await self.fetchone()
if ret is not None:
return ret
else:
raise StopAsyncIteration
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
return
|
aio-libs/aioodbc | aioodbc/cursor.py | Cursor.tables | python | def tables(self, **kw):
fut = self._run_operation(self._impl.tables, **kw)
return fut | Creates a result set of tables in the database that match the
given criteria.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param tableType: one of TABLE, VIEW, SYSTEM TABLE ... | train | https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/cursor.py#L200-L210 | [
"def _run_operation(self, func, *args, **kwargs):\n # execute func in thread pool of attached to cursor connection\n if not self._conn:\n raise pyodbc.OperationalError('Cursor is closed.')\n future = self._conn._execute(func, *args, **kwargs)\n return future\n"
] | class Cursor:
"""Cursors represent a database cursor (and map to ODBC HSTMTs), which
is used to manage the context of a fetch operation.
Cursors created from the same connection are not isolated, i.e., any
changes made to the database by a cursor are immediately visible by
the other cursors.
"""
def __init__(self, pyodbc_cursor, connection, echo=False):
self._conn = connection
self._impl = pyodbc_cursor
self._loop = connection.loop
self._echo = echo
def _run_operation(self, func, *args, **kwargs):
# execute func in thread pool of attached to cursor connection
if not self._conn:
raise pyodbc.OperationalError('Cursor is closed.')
future = self._conn._execute(func, *args, **kwargs)
return future
@property
def echo(self):
"""Return echo mode status."""
return self._echo
@property
def connection(self):
"""Cursors database connection"""
return self._conn
@property
def autocommit(self):
"""Show autocommit mode for current database session. True if
connection is in autocommit mode; False otherwse. The default
is False.
"""
return self._conn.autocommit
@property
def rowcount(self):
"""The number of rows modified by the previous DDL statement.
This is -1 if no SQL has been executed or if the number of rows is
unknown. Note that it is not uncommon for databases to report -1
after a select statement for performance reasons. (The exact number
may not be known before the first records are returned to the
application.)
"""
return self._impl.rowcount
@property
def description(self):
"""This read-only attribute is a list of 7-item tuples, each
containing (name, type_code, display_size, internal_size, precision,
scale, null_ok).
pyodbc only provides values for name, type_code, internal_size,
and null_ok. The other values are set to None.
This attribute will be None for operations that do not return rows
or if one of the execute methods has not been called.
The type_code member is the class type used to create the Python
objects when reading rows. For example, a varchar column's type will
be str.
"""
return self._impl.description
@property
def closed(self):
"""Read only property indicates if cursor has been closed"""
return self._conn is None
@property
def arraysize(self):
"""This read/write attribute specifies the number of rows to fetch
at a time with .fetchmany() . It defaults to 1 meaning to fetch a
single row at a time.
"""
return self._impl.arraysize
@arraysize.setter
def arraysize(self, size):
self._impl.arraysize = size
async def close(self):
"""Close the cursor now (rather than whenever __del__ is called).
The cursor will be unusable from this point forward; an Error
(or subclass) exception will be raised if any operation is attempted
with the cursor.
"""
if self._conn is None:
return
await self._run_operation(self._impl.close)
self._conn = None
async def execute(self, sql, *params):
"""Executes the given operation substituting any markers with
the given parameters.
:param sql: the SQL statement to execute with optional ? parameter
markers. Note that pyodbc never modifies the SQL statement.
:param params: optional parameters for the markers in the SQL. They
can be passed in a single sequence as defined by the DB API.
For convenience, however, they can also be passed individually
"""
if self._echo:
logger.info(sql)
logger.info("%r", sql)
await self._run_operation(self._impl.execute, sql, *params)
return self
def executemany(self, sql, *params):
"""Prepare a database query or command and then execute it against
all parameter sequences found in the sequence seq_of_params.
:param sql: the SQL statement to execute with optional ? parameters
:param params: sequence parameters for the markers in the SQL.
"""
fut = self._run_operation(self._impl.executemany, sql, *params)
return fut
def callproc(self, procname, args=()):
raise NotImplementedError
async def setinputsizes(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
async def setoutputsize(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
def fetchone(self):
"""Returns the next row or None when no more data is available.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
"""
fut = self._run_operation(self._impl.fetchone)
return fut
def fetchall(self):
"""Returns a list of all remaining rows.
Since this reads all rows into memory, it should not be used if
there are a lot of rows. Consider iterating over the rows instead.
However, it is useful for freeing up a Cursor so you can perform a
second query before processing the resulting rows.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT statement)
"""
fut = self._run_operation(self._impl.fetchall)
return fut
def fetchmany(self, size):
"""Returns a list of remaining rows, containing no more than size
rows, used to process results in chunks. The list will be empty when
there are no more rows.
The default for cursor.arraysize is 1 which is no different than
calling fetchone().
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
:param size: int, max number of rows to return
"""
fut = self._run_operation(self._impl.fetchmany, size)
return fut
def nextset(self):
"""This method will make the cursor skip to the next available
set, discarding any remaining rows from the current set.
If there are no more sets, the method returns None. Otherwise,
it returns a true value and subsequent calls to the fetch methods
will return rows from the next result set.
This method is primarily used if you have stored procedures that
return multiple results.
"""
fut = self._run_operation(self._impl.nextset)
return fut
def columns(self, **kw):
"""Creates a results set of column names in specified tables by
executing the ODBC SQLColumns function. Each row fetched has the
following columns.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param column: string search pattern for column names.
"""
fut = self._run_operation(self._impl.columns, **kw)
return fut
def statistics(self, catalog=None, schema=None, unique=False, quick=True):
"""Creates a results set of statistics about a single table and
the indexes associated with the table by executing SQLStatistics.
:param catalog: the catalog name
:param schema: the schmea name
:param unique: if True, only unique indexes are retured. Otherwise
all indexes are returned.
:param quick: if True, CARDINALITY and PAGES are returned only if
they are readily available from the server
"""
fut = self._run_operation(self._impl.statistics, catalog=catalog,
schema=schema, unique=unique, quick=quick)
return fut
def rowIdColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_BEST_ROWID which creates a
result set of columns that uniquely identify a row
"""
fut = self._run_operation(self._impl.rowIdColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def rowVerColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_ROWVER which creates a
result set of columns that are automatically updated when any
value in the row is updated.
"""
fut = self._run_operation(self._impl.rowVerColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def primaryKeys(self, table, catalog=None, schema=None): # nopep8
"""Creates a result set of column names that make up the primary key
for a table by executing the SQLPrimaryKeys function."""
fut = self._run_operation(self._impl.primaryKeys, table,
catalog=catalog, schema=schema)
return fut
def foreignKeys(self, *a, **kw): # nopep8
"""Executes the SQLForeignKeys function and creates a result set
of column names that are foreign keys in the specified table (columns
in the specified table that refer to primary keys in other tables)
or foreign keys in other tables that refer to the primary key in
the specified table.
"""
fut = self._run_operation(self._impl.foreignKeys, *a, **kw)
return fut
def getTypeInfo(self, sql_type): # nopep8
"""Executes SQLGetTypeInfo a creates a result set with information
about the specified data type or all data types supported by the
ODBC driver if not specified.
"""
fut = self._run_operation(self._impl.getTypeInfo, sql_type)
return fut
def procedures(self, *a, **kw):
"""Executes SQLProcedures and creates a result set of information
about the procedures in the data source.
"""
fut = self._run_operation(self._impl.procedures, *a, **kw)
return fut
def procedureColumns(self, *a, **kw): # nopep8
fut = self._run_operation(self._impl.procedureColumns, *a, **kw)
return fut
def skip(self, count):
fut = self._run_operation(self._impl.skip, count)
return fut
def commit(self):
fut = self._run_operation(self._impl.commit)
return fut
def rollback(self):
fut = self._run_operation(self._impl.rollback)
return fut
if PY_352:
def __aiter__(self):
return self
else:
async def __aiter__(self):
return self
async def __anext__(self):
ret = await self.fetchone()
if ret is not None:
return ret
else:
raise StopAsyncIteration
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
return
|
aio-libs/aioodbc | aioodbc/cursor.py | Cursor.columns | python | def columns(self, **kw):
fut = self._run_operation(self._impl.columns, **kw)
return fut | Creates a results set of column names in specified tables by
executing the ODBC SQLColumns function. Each row fetched has the
following columns.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param column: string search pattern for column names. | train | https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/cursor.py#L212-L223 | [
"def _run_operation(self, func, *args, **kwargs):\n # execute func in thread pool of attached to cursor connection\n if not self._conn:\n raise pyodbc.OperationalError('Cursor is closed.')\n future = self._conn._execute(func, *args, **kwargs)\n return future\n"
] | class Cursor:
"""Cursors represent a database cursor (and map to ODBC HSTMTs), which
is used to manage the context of a fetch operation.
Cursors created from the same connection are not isolated, i.e., any
changes made to the database by a cursor are immediately visible by
the other cursors.
"""
def __init__(self, pyodbc_cursor, connection, echo=False):
self._conn = connection
self._impl = pyodbc_cursor
self._loop = connection.loop
self._echo = echo
def _run_operation(self, func, *args, **kwargs):
# execute func in thread pool of attached to cursor connection
if not self._conn:
raise pyodbc.OperationalError('Cursor is closed.')
future = self._conn._execute(func, *args, **kwargs)
return future
@property
def echo(self):
"""Return echo mode status."""
return self._echo
@property
def connection(self):
"""Cursors database connection"""
return self._conn
@property
def autocommit(self):
"""Show autocommit mode for current database session. True if
connection is in autocommit mode; False otherwse. The default
is False.
"""
return self._conn.autocommit
@property
def rowcount(self):
"""The number of rows modified by the previous DDL statement.
This is -1 if no SQL has been executed or if the number of rows is
unknown. Note that it is not uncommon for databases to report -1
after a select statement for performance reasons. (The exact number
may not be known before the first records are returned to the
application.)
"""
return self._impl.rowcount
@property
def description(self):
"""This read-only attribute is a list of 7-item tuples, each
containing (name, type_code, display_size, internal_size, precision,
scale, null_ok).
pyodbc only provides values for name, type_code, internal_size,
and null_ok. The other values are set to None.
This attribute will be None for operations that do not return rows
or if one of the execute methods has not been called.
The type_code member is the class type used to create the Python
objects when reading rows. For example, a varchar column's type will
be str.
"""
return self._impl.description
@property
def closed(self):
"""Read only property indicates if cursor has been closed"""
return self._conn is None
@property
def arraysize(self):
"""This read/write attribute specifies the number of rows to fetch
at a time with .fetchmany() . It defaults to 1 meaning to fetch a
single row at a time.
"""
return self._impl.arraysize
@arraysize.setter
def arraysize(self, size):
self._impl.arraysize = size
async def close(self):
"""Close the cursor now (rather than whenever __del__ is called).
The cursor will be unusable from this point forward; an Error
(or subclass) exception will be raised if any operation is attempted
with the cursor.
"""
if self._conn is None:
return
await self._run_operation(self._impl.close)
self._conn = None
async def execute(self, sql, *params):
"""Executes the given operation substituting any markers with
the given parameters.
:param sql: the SQL statement to execute with optional ? parameter
markers. Note that pyodbc never modifies the SQL statement.
:param params: optional parameters for the markers in the SQL. They
can be passed in a single sequence as defined by the DB API.
For convenience, however, they can also be passed individually
"""
if self._echo:
logger.info(sql)
logger.info("%r", sql)
await self._run_operation(self._impl.execute, sql, *params)
return self
def executemany(self, sql, *params):
"""Prepare a database query or command and then execute it against
all parameter sequences found in the sequence seq_of_params.
:param sql: the SQL statement to execute with optional ? parameters
:param params: sequence parameters for the markers in the SQL.
"""
fut = self._run_operation(self._impl.executemany, sql, *params)
return fut
def callproc(self, procname, args=()):
raise NotImplementedError
async def setinputsizes(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
async def setoutputsize(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
def fetchone(self):
"""Returns the next row or None when no more data is available.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
"""
fut = self._run_operation(self._impl.fetchone)
return fut
def fetchall(self):
"""Returns a list of all remaining rows.
Since this reads all rows into memory, it should not be used if
there are a lot of rows. Consider iterating over the rows instead.
However, it is useful for freeing up a Cursor so you can perform a
second query before processing the resulting rows.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT statement)
"""
fut = self._run_operation(self._impl.fetchall)
return fut
def fetchmany(self, size):
"""Returns a list of remaining rows, containing no more than size
rows, used to process results in chunks. The list will be empty when
there are no more rows.
The default for cursor.arraysize is 1 which is no different than
calling fetchone().
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
:param size: int, max number of rows to return
"""
fut = self._run_operation(self._impl.fetchmany, size)
return fut
def nextset(self):
"""This method will make the cursor skip to the next available
set, discarding any remaining rows from the current set.
If there are no more sets, the method returns None. Otherwise,
it returns a true value and subsequent calls to the fetch methods
will return rows from the next result set.
This method is primarily used if you have stored procedures that
return multiple results.
"""
fut = self._run_operation(self._impl.nextset)
return fut
def tables(self, **kw):
"""Creates a result set of tables in the database that match the
given criteria.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param tableType: one of TABLE, VIEW, SYSTEM TABLE ...
"""
fut = self._run_operation(self._impl.tables, **kw)
return fut
def statistics(self, catalog=None, schema=None, unique=False, quick=True):
"""Creates a results set of statistics about a single table and
the indexes associated with the table by executing SQLStatistics.
:param catalog: the catalog name
:param schema: the schmea name
:param unique: if True, only unique indexes are retured. Otherwise
all indexes are returned.
:param quick: if True, CARDINALITY and PAGES are returned only if
they are readily available from the server
"""
fut = self._run_operation(self._impl.statistics, catalog=catalog,
schema=schema, unique=unique, quick=quick)
return fut
def rowIdColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_BEST_ROWID which creates a
result set of columns that uniquely identify a row
"""
fut = self._run_operation(self._impl.rowIdColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def rowVerColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_ROWVER which creates a
result set of columns that are automatically updated when any
value in the row is updated.
"""
fut = self._run_operation(self._impl.rowVerColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def primaryKeys(self, table, catalog=None, schema=None): # nopep8
"""Creates a result set of column names that make up the primary key
for a table by executing the SQLPrimaryKeys function."""
fut = self._run_operation(self._impl.primaryKeys, table,
catalog=catalog, schema=schema)
return fut
def foreignKeys(self, *a, **kw): # nopep8
"""Executes the SQLForeignKeys function and creates a result set
of column names that are foreign keys in the specified table (columns
in the specified table that refer to primary keys in other tables)
or foreign keys in other tables that refer to the primary key in
the specified table.
"""
fut = self._run_operation(self._impl.foreignKeys, *a, **kw)
return fut
def getTypeInfo(self, sql_type): # nopep8
"""Executes SQLGetTypeInfo a creates a result set with information
about the specified data type or all data types supported by the
ODBC driver if not specified.
"""
fut = self._run_operation(self._impl.getTypeInfo, sql_type)
return fut
def procedures(self, *a, **kw):
"""Executes SQLProcedures and creates a result set of information
about the procedures in the data source.
"""
fut = self._run_operation(self._impl.procedures, *a, **kw)
return fut
def procedureColumns(self, *a, **kw): # nopep8
fut = self._run_operation(self._impl.procedureColumns, *a, **kw)
return fut
def skip(self, count):
fut = self._run_operation(self._impl.skip, count)
return fut
def commit(self):
fut = self._run_operation(self._impl.commit)
return fut
def rollback(self):
fut = self._run_operation(self._impl.rollback)
return fut
if PY_352:
def __aiter__(self):
return self
else:
async def __aiter__(self):
return self
async def __anext__(self):
ret = await self.fetchone()
if ret is not None:
return ret
else:
raise StopAsyncIteration
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
return
|
aio-libs/aioodbc | aioodbc/cursor.py | Cursor.statistics | python | def statistics(self, catalog=None, schema=None, unique=False, quick=True):
fut = self._run_operation(self._impl.statistics, catalog=catalog,
schema=schema, unique=unique, quick=quick)
return fut | Creates a results set of statistics about a single table and
the indexes associated with the table by executing SQLStatistics.
:param catalog: the catalog name
:param schema: the schmea name
:param unique: if True, only unique indexes are retured. Otherwise
all indexes are returned.
:param quick: if True, CARDINALITY and PAGES are returned only if
they are readily available from the server | train | https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/cursor.py#L225-L238 | [
"def _run_operation(self, func, *args, **kwargs):\n # execute func in thread pool of attached to cursor connection\n if not self._conn:\n raise pyodbc.OperationalError('Cursor is closed.')\n future = self._conn._execute(func, *args, **kwargs)\n return future\n"
] | class Cursor:
"""Cursors represent a database cursor (and map to ODBC HSTMTs), which
is used to manage the context of a fetch operation.
Cursors created from the same connection are not isolated, i.e., any
changes made to the database by a cursor are immediately visible by
the other cursors.
"""
def __init__(self, pyodbc_cursor, connection, echo=False):
self._conn = connection
self._impl = pyodbc_cursor
self._loop = connection.loop
self._echo = echo
def _run_operation(self, func, *args, **kwargs):
# execute func in thread pool of attached to cursor connection
if not self._conn:
raise pyodbc.OperationalError('Cursor is closed.')
future = self._conn._execute(func, *args, **kwargs)
return future
@property
def echo(self):
"""Return echo mode status."""
return self._echo
@property
def connection(self):
"""Cursors database connection"""
return self._conn
@property
def autocommit(self):
"""Show autocommit mode for current database session. True if
connection is in autocommit mode; False otherwse. The default
is False.
"""
return self._conn.autocommit
@property
def rowcount(self):
"""The number of rows modified by the previous DDL statement.
This is -1 if no SQL has been executed or if the number of rows is
unknown. Note that it is not uncommon for databases to report -1
after a select statement for performance reasons. (The exact number
may not be known before the first records are returned to the
application.)
"""
return self._impl.rowcount
@property
def description(self):
"""This read-only attribute is a list of 7-item tuples, each
containing (name, type_code, display_size, internal_size, precision,
scale, null_ok).
pyodbc only provides values for name, type_code, internal_size,
and null_ok. The other values are set to None.
This attribute will be None for operations that do not return rows
or if one of the execute methods has not been called.
The type_code member is the class type used to create the Python
objects when reading rows. For example, a varchar column's type will
be str.
"""
return self._impl.description
@property
def closed(self):
"""Read only property indicates if cursor has been closed"""
return self._conn is None
@property
def arraysize(self):
"""This read/write attribute specifies the number of rows to fetch
at a time with .fetchmany() . It defaults to 1 meaning to fetch a
single row at a time.
"""
return self._impl.arraysize
@arraysize.setter
def arraysize(self, size):
self._impl.arraysize = size
async def close(self):
"""Close the cursor now (rather than whenever __del__ is called).
The cursor will be unusable from this point forward; an Error
(or subclass) exception will be raised if any operation is attempted
with the cursor.
"""
if self._conn is None:
return
await self._run_operation(self._impl.close)
self._conn = None
async def execute(self, sql, *params):
"""Executes the given operation substituting any markers with
the given parameters.
:param sql: the SQL statement to execute with optional ? parameter
markers. Note that pyodbc never modifies the SQL statement.
:param params: optional parameters for the markers in the SQL. They
can be passed in a single sequence as defined by the DB API.
For convenience, however, they can also be passed individually
"""
if self._echo:
logger.info(sql)
logger.info("%r", sql)
await self._run_operation(self._impl.execute, sql, *params)
return self
def executemany(self, sql, *params):
"""Prepare a database query or command and then execute it against
all parameter sequences found in the sequence seq_of_params.
:param sql: the SQL statement to execute with optional ? parameters
:param params: sequence parameters for the markers in the SQL.
"""
fut = self._run_operation(self._impl.executemany, sql, *params)
return fut
def callproc(self, procname, args=()):
raise NotImplementedError
async def setinputsizes(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
async def setoutputsize(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
def fetchone(self):
"""Returns the next row or None when no more data is available.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
"""
fut = self._run_operation(self._impl.fetchone)
return fut
def fetchall(self):
"""Returns a list of all remaining rows.
Since this reads all rows into memory, it should not be used if
there are a lot of rows. Consider iterating over the rows instead.
However, it is useful for freeing up a Cursor so you can perform a
second query before processing the resulting rows.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT statement)
"""
fut = self._run_operation(self._impl.fetchall)
return fut
def fetchmany(self, size):
"""Returns a list of remaining rows, containing no more than size
rows, used to process results in chunks. The list will be empty when
there are no more rows.
The default for cursor.arraysize is 1 which is no different than
calling fetchone().
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
:param size: int, max number of rows to return
"""
fut = self._run_operation(self._impl.fetchmany, size)
return fut
def nextset(self):
"""This method will make the cursor skip to the next available
set, discarding any remaining rows from the current set.
If there are no more sets, the method returns None. Otherwise,
it returns a true value and subsequent calls to the fetch methods
will return rows from the next result set.
This method is primarily used if you have stored procedures that
return multiple results.
"""
fut = self._run_operation(self._impl.nextset)
return fut
def tables(self, **kw):
"""Creates a result set of tables in the database that match the
given criteria.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param tableType: one of TABLE, VIEW, SYSTEM TABLE ...
"""
fut = self._run_operation(self._impl.tables, **kw)
return fut
def columns(self, **kw):
"""Creates a results set of column names in specified tables by
executing the ODBC SQLColumns function. Each row fetched has the
following columns.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param column: string search pattern for column names.
"""
fut = self._run_operation(self._impl.columns, **kw)
return fut
def rowIdColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_BEST_ROWID which creates a
result set of columns that uniquely identify a row
"""
fut = self._run_operation(self._impl.rowIdColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def rowVerColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_ROWVER which creates a
result set of columns that are automatically updated when any
value in the row is updated.
"""
fut = self._run_operation(self._impl.rowVerColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def primaryKeys(self, table, catalog=None, schema=None): # nopep8
"""Creates a result set of column names that make up the primary key
for a table by executing the SQLPrimaryKeys function."""
fut = self._run_operation(self._impl.primaryKeys, table,
catalog=catalog, schema=schema)
return fut
def foreignKeys(self, *a, **kw): # nopep8
"""Executes the SQLForeignKeys function and creates a result set
of column names that are foreign keys in the specified table (columns
in the specified table that refer to primary keys in other tables)
or foreign keys in other tables that refer to the primary key in
the specified table.
"""
fut = self._run_operation(self._impl.foreignKeys, *a, **kw)
return fut
def getTypeInfo(self, sql_type): # nopep8
"""Executes SQLGetTypeInfo a creates a result set with information
about the specified data type or all data types supported by the
ODBC driver if not specified.
"""
fut = self._run_operation(self._impl.getTypeInfo, sql_type)
return fut
def procedures(self, *a, **kw):
"""Executes SQLProcedures and creates a result set of information
about the procedures in the data source.
"""
fut = self._run_operation(self._impl.procedures, *a, **kw)
return fut
def procedureColumns(self, *a, **kw): # nopep8
fut = self._run_operation(self._impl.procedureColumns, *a, **kw)
return fut
def skip(self, count):
fut = self._run_operation(self._impl.skip, count)
return fut
def commit(self):
fut = self._run_operation(self._impl.commit)
return fut
def rollback(self):
fut = self._run_operation(self._impl.rollback)
return fut
if PY_352:
def __aiter__(self):
return self
else:
async def __aiter__(self):
return self
async def __anext__(self):
ret = await self.fetchone()
if ret is not None:
return ret
else:
raise StopAsyncIteration
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
return
|
aio-libs/aioodbc | aioodbc/cursor.py | Cursor.rowIdColumns | python | def rowIdColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
fut = self._run_operation(self._impl.rowIdColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut | Executes SQLSpecialColumns with SQL_BEST_ROWID which creates a
result set of columns that uniquely identify a row | train | https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/cursor.py#L240-L248 | [
"def _run_operation(self, func, *args, **kwargs):\n # execute func in thread pool of attached to cursor connection\n if not self._conn:\n raise pyodbc.OperationalError('Cursor is closed.')\n future = self._conn._execute(func, *args, **kwargs)\n return future\n"
] | class Cursor:
"""Cursors represent a database cursor (and map to ODBC HSTMTs), which
is used to manage the context of a fetch operation.
Cursors created from the same connection are not isolated, i.e., any
changes made to the database by a cursor are immediately visible by
the other cursors.
"""
def __init__(self, pyodbc_cursor, connection, echo=False):
self._conn = connection
self._impl = pyodbc_cursor
self._loop = connection.loop
self._echo = echo
def _run_operation(self, func, *args, **kwargs):
# execute func in thread pool of attached to cursor connection
if not self._conn:
raise pyodbc.OperationalError('Cursor is closed.')
future = self._conn._execute(func, *args, **kwargs)
return future
@property
def echo(self):
"""Return echo mode status."""
return self._echo
@property
def connection(self):
"""Cursors database connection"""
return self._conn
@property
def autocommit(self):
"""Show autocommit mode for current database session. True if
connection is in autocommit mode; False otherwse. The default
is False.
"""
return self._conn.autocommit
@property
def rowcount(self):
"""The number of rows modified by the previous DDL statement.
This is -1 if no SQL has been executed or if the number of rows is
unknown. Note that it is not uncommon for databases to report -1
after a select statement for performance reasons. (The exact number
may not be known before the first records are returned to the
application.)
"""
return self._impl.rowcount
@property
def description(self):
"""This read-only attribute is a list of 7-item tuples, each
containing (name, type_code, display_size, internal_size, precision,
scale, null_ok).
pyodbc only provides values for name, type_code, internal_size,
and null_ok. The other values are set to None.
This attribute will be None for operations that do not return rows
or if one of the execute methods has not been called.
The type_code member is the class type used to create the Python
objects when reading rows. For example, a varchar column's type will
be str.
"""
return self._impl.description
@property
def closed(self):
"""Read only property indicates if cursor has been closed"""
return self._conn is None
@property
def arraysize(self):
"""This read/write attribute specifies the number of rows to fetch
at a time with .fetchmany() . It defaults to 1 meaning to fetch a
single row at a time.
"""
return self._impl.arraysize
@arraysize.setter
def arraysize(self, size):
self._impl.arraysize = size
async def close(self):
"""Close the cursor now (rather than whenever __del__ is called).
The cursor will be unusable from this point forward; an Error
(or subclass) exception will be raised if any operation is attempted
with the cursor.
"""
if self._conn is None:
return
await self._run_operation(self._impl.close)
self._conn = None
async def execute(self, sql, *params):
"""Executes the given operation substituting any markers with
the given parameters.
:param sql: the SQL statement to execute with optional ? parameter
markers. Note that pyodbc never modifies the SQL statement.
:param params: optional parameters for the markers in the SQL. They
can be passed in a single sequence as defined by the DB API.
For convenience, however, they can also be passed individually
"""
if self._echo:
logger.info(sql)
logger.info("%r", sql)
await self._run_operation(self._impl.execute, sql, *params)
return self
def executemany(self, sql, *params):
"""Prepare a database query or command and then execute it against
all parameter sequences found in the sequence seq_of_params.
:param sql: the SQL statement to execute with optional ? parameters
:param params: sequence parameters for the markers in the SQL.
"""
fut = self._run_operation(self._impl.executemany, sql, *params)
return fut
def callproc(self, procname, args=()):
raise NotImplementedError
async def setinputsizes(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
async def setoutputsize(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
def fetchone(self):
"""Returns the next row or None when no more data is available.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
"""
fut = self._run_operation(self._impl.fetchone)
return fut
def fetchall(self):
"""Returns a list of all remaining rows.
Since this reads all rows into memory, it should not be used if
there are a lot of rows. Consider iterating over the rows instead.
However, it is useful for freeing up a Cursor so you can perform a
second query before processing the resulting rows.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT statement)
"""
fut = self._run_operation(self._impl.fetchall)
return fut
def fetchmany(self, size):
"""Returns a list of remaining rows, containing no more than size
rows, used to process results in chunks. The list will be empty when
there are no more rows.
The default for cursor.arraysize is 1 which is no different than
calling fetchone().
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
:param size: int, max number of rows to return
"""
fut = self._run_operation(self._impl.fetchmany, size)
return fut
def nextset(self):
"""This method will make the cursor skip to the next available
set, discarding any remaining rows from the current set.
If there are no more sets, the method returns None. Otherwise,
it returns a true value and subsequent calls to the fetch methods
will return rows from the next result set.
This method is primarily used if you have stored procedures that
return multiple results.
"""
fut = self._run_operation(self._impl.nextset)
return fut
def tables(self, **kw):
"""Creates a result set of tables in the database that match the
given criteria.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param tableType: one of TABLE, VIEW, SYSTEM TABLE ...
"""
fut = self._run_operation(self._impl.tables, **kw)
return fut
def columns(self, **kw):
"""Creates a results set of column names in specified tables by
executing the ODBC SQLColumns function. Each row fetched has the
following columns.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param column: string search pattern for column names.
"""
fut = self._run_operation(self._impl.columns, **kw)
return fut
def statistics(self, catalog=None, schema=None, unique=False, quick=True):
"""Creates a results set of statistics about a single table and
the indexes associated with the table by executing SQLStatistics.
:param catalog: the catalog name
:param schema: the schmea name
:param unique: if True, only unique indexes are retured. Otherwise
all indexes are returned.
:param quick: if True, CARDINALITY and PAGES are returned only if
they are readily available from the server
"""
fut = self._run_operation(self._impl.statistics, catalog=catalog,
schema=schema, unique=unique, quick=quick)
return fut
def rowVerColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_ROWVER which creates a
result set of columns that are automatically updated when any
value in the row is updated.
"""
fut = self._run_operation(self._impl.rowVerColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def primaryKeys(self, table, catalog=None, schema=None): # nopep8
"""Creates a result set of column names that make up the primary key
for a table by executing the SQLPrimaryKeys function."""
fut = self._run_operation(self._impl.primaryKeys, table,
catalog=catalog, schema=schema)
return fut
def foreignKeys(self, *a, **kw): # nopep8
"""Executes the SQLForeignKeys function and creates a result set
of column names that are foreign keys in the specified table (columns
in the specified table that refer to primary keys in other tables)
or foreign keys in other tables that refer to the primary key in
the specified table.
"""
fut = self._run_operation(self._impl.foreignKeys, *a, **kw)
return fut
def getTypeInfo(self, sql_type): # nopep8
"""Executes SQLGetTypeInfo a creates a result set with information
about the specified data type or all data types supported by the
ODBC driver if not specified.
"""
fut = self._run_operation(self._impl.getTypeInfo, sql_type)
return fut
def procedures(self, *a, **kw):
"""Executes SQLProcedures and creates a result set of information
about the procedures in the data source.
"""
fut = self._run_operation(self._impl.procedures, *a, **kw)
return fut
def procedureColumns(self, *a, **kw): # nopep8
fut = self._run_operation(self._impl.procedureColumns, *a, **kw)
return fut
def skip(self, count):
fut = self._run_operation(self._impl.skip, count)
return fut
def commit(self):
fut = self._run_operation(self._impl.commit)
return fut
def rollback(self):
fut = self._run_operation(self._impl.rollback)
return fut
if PY_352:
def __aiter__(self):
return self
else:
async def __aiter__(self):
return self
async def __anext__(self):
ret = await self.fetchone()
if ret is not None:
return ret
else:
raise StopAsyncIteration
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
return
|
aio-libs/aioodbc | aioodbc/cursor.py | Cursor.primaryKeys | python | def primaryKeys(self, table, catalog=None, schema=None): # nopep8
fut = self._run_operation(self._impl.primaryKeys, table,
catalog=catalog, schema=schema)
return fut | Creates a result set of column names that make up the primary key
for a table by executing the SQLPrimaryKeys function. | train | https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/cursor.py#L261-L266 | [
"def _run_operation(self, func, *args, **kwargs):\n # execute func in thread pool of attached to cursor connection\n if not self._conn:\n raise pyodbc.OperationalError('Cursor is closed.')\n future = self._conn._execute(func, *args, **kwargs)\n return future\n"
] | class Cursor:
"""Cursors represent a database cursor (and map to ODBC HSTMTs), which
is used to manage the context of a fetch operation.
Cursors created from the same connection are not isolated, i.e., any
changes made to the database by a cursor are immediately visible by
the other cursors.
"""
def __init__(self, pyodbc_cursor, connection, echo=False):
self._conn = connection
self._impl = pyodbc_cursor
self._loop = connection.loop
self._echo = echo
def _run_operation(self, func, *args, **kwargs):
# execute func in thread pool of attached to cursor connection
if not self._conn:
raise pyodbc.OperationalError('Cursor is closed.')
future = self._conn._execute(func, *args, **kwargs)
return future
@property
def echo(self):
"""Return echo mode status."""
return self._echo
@property
def connection(self):
"""Cursors database connection"""
return self._conn
@property
def autocommit(self):
"""Show autocommit mode for current database session. True if
connection is in autocommit mode; False otherwse. The default
is False.
"""
return self._conn.autocommit
@property
def rowcount(self):
"""The number of rows modified by the previous DDL statement.
This is -1 if no SQL has been executed or if the number of rows is
unknown. Note that it is not uncommon for databases to report -1
after a select statement for performance reasons. (The exact number
may not be known before the first records are returned to the
application.)
"""
return self._impl.rowcount
@property
def description(self):
"""This read-only attribute is a list of 7-item tuples, each
containing (name, type_code, display_size, internal_size, precision,
scale, null_ok).
pyodbc only provides values for name, type_code, internal_size,
and null_ok. The other values are set to None.
This attribute will be None for operations that do not return rows
or if one of the execute methods has not been called.
The type_code member is the class type used to create the Python
objects when reading rows. For example, a varchar column's type will
be str.
"""
return self._impl.description
@property
def closed(self):
"""Read only property indicates if cursor has been closed"""
return self._conn is None
@property
def arraysize(self):
"""This read/write attribute specifies the number of rows to fetch
at a time with .fetchmany() . It defaults to 1 meaning to fetch a
single row at a time.
"""
return self._impl.arraysize
@arraysize.setter
def arraysize(self, size):
self._impl.arraysize = size
async def close(self):
"""Close the cursor now (rather than whenever __del__ is called).
The cursor will be unusable from this point forward; an Error
(or subclass) exception will be raised if any operation is attempted
with the cursor.
"""
if self._conn is None:
return
await self._run_operation(self._impl.close)
self._conn = None
async def execute(self, sql, *params):
"""Executes the given operation substituting any markers with
the given parameters.
:param sql: the SQL statement to execute with optional ? parameter
markers. Note that pyodbc never modifies the SQL statement.
:param params: optional parameters for the markers in the SQL. They
can be passed in a single sequence as defined by the DB API.
For convenience, however, they can also be passed individually
"""
if self._echo:
logger.info(sql)
logger.info("%r", sql)
await self._run_operation(self._impl.execute, sql, *params)
return self
def executemany(self, sql, *params):
"""Prepare a database query or command and then execute it against
all parameter sequences found in the sequence seq_of_params.
:param sql: the SQL statement to execute with optional ? parameters
:param params: sequence parameters for the markers in the SQL.
"""
fut = self._run_operation(self._impl.executemany, sql, *params)
return fut
def callproc(self, procname, args=()):
raise NotImplementedError
async def setinputsizes(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
async def setoutputsize(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
def fetchone(self):
"""Returns the next row or None when no more data is available.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
"""
fut = self._run_operation(self._impl.fetchone)
return fut
def fetchall(self):
"""Returns a list of all remaining rows.
Since this reads all rows into memory, it should not be used if
there are a lot of rows. Consider iterating over the rows instead.
However, it is useful for freeing up a Cursor so you can perform a
second query before processing the resulting rows.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT statement)
"""
fut = self._run_operation(self._impl.fetchall)
return fut
def fetchmany(self, size):
"""Returns a list of remaining rows, containing no more than size
rows, used to process results in chunks. The list will be empty when
there are no more rows.
The default for cursor.arraysize is 1 which is no different than
calling fetchone().
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
:param size: int, max number of rows to return
"""
fut = self._run_operation(self._impl.fetchmany, size)
return fut
def nextset(self):
"""This method will make the cursor skip to the next available
set, discarding any remaining rows from the current set.
If there are no more sets, the method returns None. Otherwise,
it returns a true value and subsequent calls to the fetch methods
will return rows from the next result set.
This method is primarily used if you have stored procedures that
return multiple results.
"""
fut = self._run_operation(self._impl.nextset)
return fut
def tables(self, **kw):
"""Creates a result set of tables in the database that match the
given criteria.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param tableType: one of TABLE, VIEW, SYSTEM TABLE ...
"""
fut = self._run_operation(self._impl.tables, **kw)
return fut
def columns(self, **kw):
"""Creates a results set of column names in specified tables by
executing the ODBC SQLColumns function. Each row fetched has the
following columns.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param column: string search pattern for column names.
"""
fut = self._run_operation(self._impl.columns, **kw)
return fut
def statistics(self, catalog=None, schema=None, unique=False, quick=True):
"""Creates a results set of statistics about a single table and
the indexes associated with the table by executing SQLStatistics.
:param catalog: the catalog name
:param schema: the schmea name
:param unique: if True, only unique indexes are retured. Otherwise
all indexes are returned.
:param quick: if True, CARDINALITY and PAGES are returned only if
they are readily available from the server
"""
fut = self._run_operation(self._impl.statistics, catalog=catalog,
schema=schema, unique=unique, quick=quick)
return fut
def rowIdColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_BEST_ROWID which creates a
result set of columns that uniquely identify a row
"""
fut = self._run_operation(self._impl.rowIdColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def rowVerColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_ROWVER which creates a
result set of columns that are automatically updated when any
value in the row is updated.
"""
fut = self._run_operation(self._impl.rowVerColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def foreignKeys(self, *a, **kw): # nopep8
"""Executes the SQLForeignKeys function and creates a result set
of column names that are foreign keys in the specified table (columns
in the specified table that refer to primary keys in other tables)
or foreign keys in other tables that refer to the primary key in
the specified table.
"""
fut = self._run_operation(self._impl.foreignKeys, *a, **kw)
return fut
def getTypeInfo(self, sql_type): # nopep8
"""Executes SQLGetTypeInfo a creates a result set with information
about the specified data type or all data types supported by the
ODBC driver if not specified.
"""
fut = self._run_operation(self._impl.getTypeInfo, sql_type)
return fut
def procedures(self, *a, **kw):
"""Executes SQLProcedures and creates a result set of information
about the procedures in the data source.
"""
fut = self._run_operation(self._impl.procedures, *a, **kw)
return fut
def procedureColumns(self, *a, **kw): # nopep8
fut = self._run_operation(self._impl.procedureColumns, *a, **kw)
return fut
def skip(self, count):
fut = self._run_operation(self._impl.skip, count)
return fut
def commit(self):
fut = self._run_operation(self._impl.commit)
return fut
def rollback(self):
fut = self._run_operation(self._impl.rollback)
return fut
if PY_352:
def __aiter__(self):
return self
else:
async def __aiter__(self):
return self
async def __anext__(self):
ret = await self.fetchone()
if ret is not None:
return ret
else:
raise StopAsyncIteration
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
return
|
aio-libs/aioodbc | aioodbc/cursor.py | Cursor.foreignKeys | python | def foreignKeys(self, *a, **kw): # nopep8
fut = self._run_operation(self._impl.foreignKeys, *a, **kw)
return fut | Executes the SQLForeignKeys function and creates a result set
of column names that are foreign keys in the specified table (columns
in the specified table that refer to primary keys in other tables)
or foreign keys in other tables that refer to the primary key in
the specified table. | train | https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/cursor.py#L268-L276 | [
"def _run_operation(self, func, *args, **kwargs):\n # execute func in thread pool of attached to cursor connection\n if not self._conn:\n raise pyodbc.OperationalError('Cursor is closed.')\n future = self._conn._execute(func, *args, **kwargs)\n return future\n"
] | class Cursor:
"""Cursors represent a database cursor (and map to ODBC HSTMTs), which
is used to manage the context of a fetch operation.
Cursors created from the same connection are not isolated, i.e., any
changes made to the database by a cursor are immediately visible by
the other cursors.
"""
def __init__(self, pyodbc_cursor, connection, echo=False):
self._conn = connection
self._impl = pyodbc_cursor
self._loop = connection.loop
self._echo = echo
def _run_operation(self, func, *args, **kwargs):
# execute func in thread pool of attached to cursor connection
if not self._conn:
raise pyodbc.OperationalError('Cursor is closed.')
future = self._conn._execute(func, *args, **kwargs)
return future
@property
def echo(self):
"""Return echo mode status."""
return self._echo
@property
def connection(self):
"""Cursors database connection"""
return self._conn
@property
def autocommit(self):
"""Show autocommit mode for current database session. True if
connection is in autocommit mode; False otherwse. The default
is False.
"""
return self._conn.autocommit
@property
def rowcount(self):
"""The number of rows modified by the previous DDL statement.
This is -1 if no SQL has been executed or if the number of rows is
unknown. Note that it is not uncommon for databases to report -1
after a select statement for performance reasons. (The exact number
may not be known before the first records are returned to the
application.)
"""
return self._impl.rowcount
@property
def description(self):
"""This read-only attribute is a list of 7-item tuples, each
containing (name, type_code, display_size, internal_size, precision,
scale, null_ok).
pyodbc only provides values for name, type_code, internal_size,
and null_ok. The other values are set to None.
This attribute will be None for operations that do not return rows
or if one of the execute methods has not been called.
The type_code member is the class type used to create the Python
objects when reading rows. For example, a varchar column's type will
be str.
"""
return self._impl.description
@property
def closed(self):
"""Read only property indicates if cursor has been closed"""
return self._conn is None
@property
def arraysize(self):
"""This read/write attribute specifies the number of rows to fetch
at a time with .fetchmany() . It defaults to 1 meaning to fetch a
single row at a time.
"""
return self._impl.arraysize
@arraysize.setter
def arraysize(self, size):
self._impl.arraysize = size
async def close(self):
"""Close the cursor now (rather than whenever __del__ is called).
The cursor will be unusable from this point forward; an Error
(or subclass) exception will be raised if any operation is attempted
with the cursor.
"""
if self._conn is None:
return
await self._run_operation(self._impl.close)
self._conn = None
async def execute(self, sql, *params):
"""Executes the given operation substituting any markers with
the given parameters.
:param sql: the SQL statement to execute with optional ? parameter
markers. Note that pyodbc never modifies the SQL statement.
:param params: optional parameters for the markers in the SQL. They
can be passed in a single sequence as defined by the DB API.
For convenience, however, they can also be passed individually
"""
if self._echo:
logger.info(sql)
logger.info("%r", sql)
await self._run_operation(self._impl.execute, sql, *params)
return self
def executemany(self, sql, *params):
"""Prepare a database query or command and then execute it against
all parameter sequences found in the sequence seq_of_params.
:param sql: the SQL statement to execute with optional ? parameters
:param params: sequence parameters for the markers in the SQL.
"""
fut = self._run_operation(self._impl.executemany, sql, *params)
return fut
def callproc(self, procname, args=()):
raise NotImplementedError
async def setinputsizes(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
async def setoutputsize(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
def fetchone(self):
"""Returns the next row or None when no more data is available.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
"""
fut = self._run_operation(self._impl.fetchone)
return fut
def fetchall(self):
"""Returns a list of all remaining rows.
Since this reads all rows into memory, it should not be used if
there are a lot of rows. Consider iterating over the rows instead.
However, it is useful for freeing up a Cursor so you can perform a
second query before processing the resulting rows.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT statement)
"""
fut = self._run_operation(self._impl.fetchall)
return fut
def fetchmany(self, size):
"""Returns a list of remaining rows, containing no more than size
rows, used to process results in chunks. The list will be empty when
there are no more rows.
The default for cursor.arraysize is 1 which is no different than
calling fetchone().
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
:param size: int, max number of rows to return
"""
fut = self._run_operation(self._impl.fetchmany, size)
return fut
def nextset(self):
"""This method will make the cursor skip to the next available
set, discarding any remaining rows from the current set.
If there are no more sets, the method returns None. Otherwise,
it returns a true value and subsequent calls to the fetch methods
will return rows from the next result set.
This method is primarily used if you have stored procedures that
return multiple results.
"""
fut = self._run_operation(self._impl.nextset)
return fut
def tables(self, **kw):
"""Creates a result set of tables in the database that match the
given criteria.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param tableType: one of TABLE, VIEW, SYSTEM TABLE ...
"""
fut = self._run_operation(self._impl.tables, **kw)
return fut
def columns(self, **kw):
"""Creates a results set of column names in specified tables by
executing the ODBC SQLColumns function. Each row fetched has the
following columns.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param column: string search pattern for column names.
"""
fut = self._run_operation(self._impl.columns, **kw)
return fut
def statistics(self, catalog=None, schema=None, unique=False, quick=True):
"""Creates a results set of statistics about a single table and
the indexes associated with the table by executing SQLStatistics.
:param catalog: the catalog name
:param schema: the schmea name
:param unique: if True, only unique indexes are retured. Otherwise
all indexes are returned.
:param quick: if True, CARDINALITY and PAGES are returned only if
they are readily available from the server
"""
fut = self._run_operation(self._impl.statistics, catalog=catalog,
schema=schema, unique=unique, quick=quick)
return fut
def rowIdColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_BEST_ROWID which creates a
result set of columns that uniquely identify a row
"""
fut = self._run_operation(self._impl.rowIdColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def rowVerColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_ROWVER which creates a
result set of columns that are automatically updated when any
value in the row is updated.
"""
fut = self._run_operation(self._impl.rowVerColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def primaryKeys(self, table, catalog=None, schema=None): # nopep8
"""Creates a result set of column names that make up the primary key
for a table by executing the SQLPrimaryKeys function."""
fut = self._run_operation(self._impl.primaryKeys, table,
catalog=catalog, schema=schema)
return fut
def getTypeInfo(self, sql_type): # nopep8
"""Executes SQLGetTypeInfo a creates a result set with information
about the specified data type or all data types supported by the
ODBC driver if not specified.
"""
fut = self._run_operation(self._impl.getTypeInfo, sql_type)
return fut
def procedures(self, *a, **kw):
"""Executes SQLProcedures and creates a result set of information
about the procedures in the data source.
"""
fut = self._run_operation(self._impl.procedures, *a, **kw)
return fut
def procedureColumns(self, *a, **kw): # nopep8
fut = self._run_operation(self._impl.procedureColumns, *a, **kw)
return fut
def skip(self, count):
fut = self._run_operation(self._impl.skip, count)
return fut
def commit(self):
fut = self._run_operation(self._impl.commit)
return fut
def rollback(self):
fut = self._run_operation(self._impl.rollback)
return fut
if PY_352:
def __aiter__(self):
return self
else:
async def __aiter__(self):
return self
async def __anext__(self):
ret = await self.fetchone()
if ret is not None:
return ret
else:
raise StopAsyncIteration
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
return
|
aio-libs/aioodbc | aioodbc/cursor.py | Cursor.getTypeInfo | python | def getTypeInfo(self, sql_type): # nopep8
fut = self._run_operation(self._impl.getTypeInfo, sql_type)
return fut | Executes SQLGetTypeInfo a creates a result set with information
about the specified data type or all data types supported by the
ODBC driver if not specified. | train | https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/cursor.py#L278-L284 | [
"def _run_operation(self, func, *args, **kwargs):\n # execute func in thread pool of attached to cursor connection\n if not self._conn:\n raise pyodbc.OperationalError('Cursor is closed.')\n future = self._conn._execute(func, *args, **kwargs)\n return future\n"
] | class Cursor:
"""Cursors represent a database cursor (and map to ODBC HSTMTs), which
is used to manage the context of a fetch operation.
Cursors created from the same connection are not isolated, i.e., any
changes made to the database by a cursor are immediately visible by
the other cursors.
"""
def __init__(self, pyodbc_cursor, connection, echo=False):
self._conn = connection
self._impl = pyodbc_cursor
self._loop = connection.loop
self._echo = echo
def _run_operation(self, func, *args, **kwargs):
# execute func in thread pool of attached to cursor connection
if not self._conn:
raise pyodbc.OperationalError('Cursor is closed.')
future = self._conn._execute(func, *args, **kwargs)
return future
@property
def echo(self):
"""Return echo mode status."""
return self._echo
@property
def connection(self):
"""Cursors database connection"""
return self._conn
@property
def autocommit(self):
"""Show autocommit mode for current database session. True if
connection is in autocommit mode; False otherwse. The default
is False.
"""
return self._conn.autocommit
@property
def rowcount(self):
"""The number of rows modified by the previous DDL statement.
This is -1 if no SQL has been executed or if the number of rows is
unknown. Note that it is not uncommon for databases to report -1
after a select statement for performance reasons. (The exact number
may not be known before the first records are returned to the
application.)
"""
return self._impl.rowcount
@property
def description(self):
"""This read-only attribute is a list of 7-item tuples, each
containing (name, type_code, display_size, internal_size, precision,
scale, null_ok).
pyodbc only provides values for name, type_code, internal_size,
and null_ok. The other values are set to None.
This attribute will be None for operations that do not return rows
or if one of the execute methods has not been called.
The type_code member is the class type used to create the Python
objects when reading rows. For example, a varchar column's type will
be str.
"""
return self._impl.description
@property
def closed(self):
"""Read only property indicates if cursor has been closed"""
return self._conn is None
@property
def arraysize(self):
"""This read/write attribute specifies the number of rows to fetch
at a time with .fetchmany() . It defaults to 1 meaning to fetch a
single row at a time.
"""
return self._impl.arraysize
@arraysize.setter
def arraysize(self, size):
self._impl.arraysize = size
async def close(self):
"""Close the cursor now (rather than whenever __del__ is called).
The cursor will be unusable from this point forward; an Error
(or subclass) exception will be raised if any operation is attempted
with the cursor.
"""
if self._conn is None:
return
await self._run_operation(self._impl.close)
self._conn = None
async def execute(self, sql, *params):
"""Executes the given operation substituting any markers with
the given parameters.
:param sql: the SQL statement to execute with optional ? parameter
markers. Note that pyodbc never modifies the SQL statement.
:param params: optional parameters for the markers in the SQL. They
can be passed in a single sequence as defined by the DB API.
For convenience, however, they can also be passed individually
"""
if self._echo:
logger.info(sql)
logger.info("%r", sql)
await self._run_operation(self._impl.execute, sql, *params)
return self
def executemany(self, sql, *params):
"""Prepare a database query or command and then execute it against
all parameter sequences found in the sequence seq_of_params.
:param sql: the SQL statement to execute with optional ? parameters
:param params: sequence parameters for the markers in the SQL.
"""
fut = self._run_operation(self._impl.executemany, sql, *params)
return fut
def callproc(self, procname, args=()):
raise NotImplementedError
async def setinputsizes(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
async def setoutputsize(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
def fetchone(self):
"""Returns the next row or None when no more data is available.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
"""
fut = self._run_operation(self._impl.fetchone)
return fut
def fetchall(self):
"""Returns a list of all remaining rows.
Since this reads all rows into memory, it should not be used if
there are a lot of rows. Consider iterating over the rows instead.
However, it is useful for freeing up a Cursor so you can perform a
second query before processing the resulting rows.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT statement)
"""
fut = self._run_operation(self._impl.fetchall)
return fut
def fetchmany(self, size):
"""Returns a list of remaining rows, containing no more than size
rows, used to process results in chunks. The list will be empty when
there are no more rows.
The default for cursor.arraysize is 1 which is no different than
calling fetchone().
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
:param size: int, max number of rows to return
"""
fut = self._run_operation(self._impl.fetchmany, size)
return fut
def nextset(self):
"""This method will make the cursor skip to the next available
set, discarding any remaining rows from the current set.
If there are no more sets, the method returns None. Otherwise,
it returns a true value and subsequent calls to the fetch methods
will return rows from the next result set.
This method is primarily used if you have stored procedures that
return multiple results.
"""
fut = self._run_operation(self._impl.nextset)
return fut
def tables(self, **kw):
"""Creates a result set of tables in the database that match the
given criteria.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param tableType: one of TABLE, VIEW, SYSTEM TABLE ...
"""
fut = self._run_operation(self._impl.tables, **kw)
return fut
def columns(self, **kw):
"""Creates a results set of column names in specified tables by
executing the ODBC SQLColumns function. Each row fetched has the
following columns.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param column: string search pattern for column names.
"""
fut = self._run_operation(self._impl.columns, **kw)
return fut
def statistics(self, catalog=None, schema=None, unique=False, quick=True):
"""Creates a results set of statistics about a single table and
the indexes associated with the table by executing SQLStatistics.
:param catalog: the catalog name
:param schema: the schmea name
:param unique: if True, only unique indexes are retured. Otherwise
all indexes are returned.
:param quick: if True, CARDINALITY and PAGES are returned only if
they are readily available from the server
"""
fut = self._run_operation(self._impl.statistics, catalog=catalog,
schema=schema, unique=unique, quick=quick)
return fut
def rowIdColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_BEST_ROWID which creates a
result set of columns that uniquely identify a row
"""
fut = self._run_operation(self._impl.rowIdColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def rowVerColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_ROWVER which creates a
result set of columns that are automatically updated when any
value in the row is updated.
"""
fut = self._run_operation(self._impl.rowVerColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def primaryKeys(self, table, catalog=None, schema=None): # nopep8
"""Creates a result set of column names that make up the primary key
for a table by executing the SQLPrimaryKeys function."""
fut = self._run_operation(self._impl.primaryKeys, table,
catalog=catalog, schema=schema)
return fut
def foreignKeys(self, *a, **kw): # nopep8
"""Executes the SQLForeignKeys function and creates a result set
of column names that are foreign keys in the specified table (columns
in the specified table that refer to primary keys in other tables)
or foreign keys in other tables that refer to the primary key in
the specified table.
"""
fut = self._run_operation(self._impl.foreignKeys, *a, **kw)
return fut
def procedures(self, *a, **kw):
"""Executes SQLProcedures and creates a result set of information
about the procedures in the data source.
"""
fut = self._run_operation(self._impl.procedures, *a, **kw)
return fut
def procedureColumns(self, *a, **kw): # nopep8
fut = self._run_operation(self._impl.procedureColumns, *a, **kw)
return fut
def skip(self, count):
fut = self._run_operation(self._impl.skip, count)
return fut
def commit(self):
fut = self._run_operation(self._impl.commit)
return fut
def rollback(self):
fut = self._run_operation(self._impl.rollback)
return fut
if PY_352:
def __aiter__(self):
return self
else:
async def __aiter__(self):
return self
async def __anext__(self):
ret = await self.fetchone()
if ret is not None:
return ret
else:
raise StopAsyncIteration
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
return
|
aio-libs/aioodbc | aioodbc/cursor.py | Cursor.procedures | python | def procedures(self, *a, **kw):
fut = self._run_operation(self._impl.procedures, *a, **kw)
return fut | Executes SQLProcedures and creates a result set of information
about the procedures in the data source. | train | https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/cursor.py#L286-L291 | [
"def _run_operation(self, func, *args, **kwargs):\n # execute func in thread pool of attached to cursor connection\n if not self._conn:\n raise pyodbc.OperationalError('Cursor is closed.')\n future = self._conn._execute(func, *args, **kwargs)\n return future\n"
] | class Cursor:
"""Cursors represent a database cursor (and map to ODBC HSTMTs), which
is used to manage the context of a fetch operation.
Cursors created from the same connection are not isolated, i.e., any
changes made to the database by a cursor are immediately visible by
the other cursors.
"""
def __init__(self, pyodbc_cursor, connection, echo=False):
self._conn = connection
self._impl = pyodbc_cursor
self._loop = connection.loop
self._echo = echo
def _run_operation(self, func, *args, **kwargs):
# execute func in thread pool of attached to cursor connection
if not self._conn:
raise pyodbc.OperationalError('Cursor is closed.')
future = self._conn._execute(func, *args, **kwargs)
return future
@property
def echo(self):
"""Return echo mode status."""
return self._echo
@property
def connection(self):
"""Cursors database connection"""
return self._conn
@property
def autocommit(self):
"""Show autocommit mode for current database session. True if
connection is in autocommit mode; False otherwse. The default
is False.
"""
return self._conn.autocommit
@property
def rowcount(self):
"""The number of rows modified by the previous DDL statement.
This is -1 if no SQL has been executed or if the number of rows is
unknown. Note that it is not uncommon for databases to report -1
after a select statement for performance reasons. (The exact number
may not be known before the first records are returned to the
application.)
"""
return self._impl.rowcount
@property
def description(self):
"""This read-only attribute is a list of 7-item tuples, each
containing (name, type_code, display_size, internal_size, precision,
scale, null_ok).
pyodbc only provides values for name, type_code, internal_size,
and null_ok. The other values are set to None.
This attribute will be None for operations that do not return rows
or if one of the execute methods has not been called.
The type_code member is the class type used to create the Python
objects when reading rows. For example, a varchar column's type will
be str.
"""
return self._impl.description
@property
def closed(self):
"""Read only property indicates if cursor has been closed"""
return self._conn is None
@property
def arraysize(self):
"""This read/write attribute specifies the number of rows to fetch
at a time with .fetchmany() . It defaults to 1 meaning to fetch a
single row at a time.
"""
return self._impl.arraysize
@arraysize.setter
def arraysize(self, size):
self._impl.arraysize = size
async def close(self):
"""Close the cursor now (rather than whenever __del__ is called).
The cursor will be unusable from this point forward; an Error
(or subclass) exception will be raised if any operation is attempted
with the cursor.
"""
if self._conn is None:
return
await self._run_operation(self._impl.close)
self._conn = None
async def execute(self, sql, *params):
"""Executes the given operation substituting any markers with
the given parameters.
:param sql: the SQL statement to execute with optional ? parameter
markers. Note that pyodbc never modifies the SQL statement.
:param params: optional parameters for the markers in the SQL. They
can be passed in a single sequence as defined by the DB API.
For convenience, however, they can also be passed individually
"""
if self._echo:
logger.info(sql)
logger.info("%r", sql)
await self._run_operation(self._impl.execute, sql, *params)
return self
def executemany(self, sql, *params):
"""Prepare a database query or command and then execute it against
all parameter sequences found in the sequence seq_of_params.
:param sql: the SQL statement to execute with optional ? parameters
:param params: sequence parameters for the markers in the SQL.
"""
fut = self._run_operation(self._impl.executemany, sql, *params)
return fut
def callproc(self, procname, args=()):
raise NotImplementedError
async def setinputsizes(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
async def setoutputsize(self, *args, **kwargs):
"""Does nothing, required by DB API."""
return None
def fetchone(self):
"""Returns the next row or None when no more data is available.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
"""
fut = self._run_operation(self._impl.fetchone)
return fut
def fetchall(self):
"""Returns a list of all remaining rows.
Since this reads all rows into memory, it should not be used if
there are a lot of rows. Consider iterating over the rows instead.
However, it is useful for freeing up a Cursor so you can perform a
second query before processing the resulting rows.
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT statement)
"""
fut = self._run_operation(self._impl.fetchall)
return fut
def fetchmany(self, size):
"""Returns a list of remaining rows, containing no more than size
rows, used to process results in chunks. The list will be empty when
there are no more rows.
The default for cursor.arraysize is 1 which is no different than
calling fetchone().
A ProgrammingError exception is raised if no SQL has been executed
or if it did not return a result set (e.g. was not a SELECT
statement).
:param size: int, max number of rows to return
"""
fut = self._run_operation(self._impl.fetchmany, size)
return fut
def nextset(self):
"""This method will make the cursor skip to the next available
set, discarding any remaining rows from the current set.
If there are no more sets, the method returns None. Otherwise,
it returns a true value and subsequent calls to the fetch methods
will return rows from the next result set.
This method is primarily used if you have stored procedures that
return multiple results.
"""
fut = self._run_operation(self._impl.nextset)
return fut
def tables(self, **kw):
"""Creates a result set of tables in the database that match the
given criteria.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param tableType: one of TABLE, VIEW, SYSTEM TABLE ...
"""
fut = self._run_operation(self._impl.tables, **kw)
return fut
def columns(self, **kw):
"""Creates a results set of column names in specified tables by
executing the ODBC SQLColumns function. Each row fetched has the
following columns.
:param table: the table tname
:param catalog: the catalog name
:param schema: the schmea name
:param column: string search pattern for column names.
"""
fut = self._run_operation(self._impl.columns, **kw)
return fut
def statistics(self, catalog=None, schema=None, unique=False, quick=True):
"""Creates a results set of statistics about a single table and
the indexes associated with the table by executing SQLStatistics.
:param catalog: the catalog name
:param schema: the schmea name
:param unique: if True, only unique indexes are retured. Otherwise
all indexes are returned.
:param quick: if True, CARDINALITY and PAGES are returned only if
they are readily available from the server
"""
fut = self._run_operation(self._impl.statistics, catalog=catalog,
schema=schema, unique=unique, quick=quick)
return fut
def rowIdColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_BEST_ROWID which creates a
result set of columns that uniquely identify a row
"""
fut = self._run_operation(self._impl.rowIdColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def rowVerColumns(self, table, catalog=None, schema=None, # nopep8
nullable=True):
"""Executes SQLSpecialColumns with SQL_ROWVER which creates a
result set of columns that are automatically updated when any
value in the row is updated.
"""
fut = self._run_operation(self._impl.rowVerColumns, table,
catalog=catalog, schema=schema,
nullable=nullable)
return fut
def primaryKeys(self, table, catalog=None, schema=None): # nopep8
"""Creates a result set of column names that make up the primary key
for a table by executing the SQLPrimaryKeys function."""
fut = self._run_operation(self._impl.primaryKeys, table,
catalog=catalog, schema=schema)
return fut
def foreignKeys(self, *a, **kw): # nopep8
"""Executes the SQLForeignKeys function and creates a result set
of column names that are foreign keys in the specified table (columns
in the specified table that refer to primary keys in other tables)
or foreign keys in other tables that refer to the primary key in
the specified table.
"""
fut = self._run_operation(self._impl.foreignKeys, *a, **kw)
return fut
def getTypeInfo(self, sql_type): # nopep8
"""Executes SQLGetTypeInfo a creates a result set with information
about the specified data type or all data types supported by the
ODBC driver if not specified.
"""
fut = self._run_operation(self._impl.getTypeInfo, sql_type)
return fut
def procedureColumns(self, *a, **kw): # nopep8
fut = self._run_operation(self._impl.procedureColumns, *a, **kw)
return fut
def skip(self, count):
fut = self._run_operation(self._impl.skip, count)
return fut
def commit(self):
fut = self._run_operation(self._impl.commit)
return fut
def rollback(self):
fut = self._run_operation(self._impl.rollback)
return fut
if PY_352:
def __aiter__(self):
return self
else:
async def __aiter__(self):
return self
async def __anext__(self):
ret = await self.fetchone()
if ret is not None:
return ret
else:
raise StopAsyncIteration
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
return
|
aio-libs/aioodbc | aioodbc/__init__.py | dataSources | python | async def dataSources(loop=None, executor=None):
loop = loop or asyncio.get_event_loop()
sources = await loop.run_in_executor(executor, _dataSources)
return sources | Returns a dictionary mapping available DSNs to their descriptions.
:param loop: asyncio compatible event loop
:param executor: instance of custom ThreadPoolExecutor, if not supplied
default executor will be used
:return dict: mapping of dsn to driver description | train | https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/__init__.py#L14-L24 | null | import asyncio
from pyodbc import dataSources as _dataSources
from .connection import connect, Connection
from .pool import create_pool, Pool
__version__ = '0.3.2'
__all__ = ['connect', 'Connection', 'create_pool', 'Pool', 'dataSources']
(connect, Connection, create_pool, Pool) # pyflakes
|
aio-libs/aioodbc | aioodbc/pool.py | Pool.clear | python | async def clear(self):
with (await self._cond):
while self._free:
conn = self._free.popleft()
await conn.close()
self._cond.notify() | Close all free connections in pool. | train | https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/pool.py#L77-L83 | null | class Pool(asyncio.AbstractServer):
"""Connection pool"""
def __init__(self, minsize, maxsize, echo, loop, pool_recycle, **kwargs):
if minsize < 0:
raise ValueError("minsize should be zero or greater")
if maxsize < minsize:
raise ValueError("maxsize should be not less than minsize")
self._minsize = minsize
self._loop = loop
self._conn_kwargs = kwargs
self._acquiring = 0
self._recycle = pool_recycle
self._free = collections.deque(maxlen=maxsize)
self._cond = asyncio.Condition(loop=loop)
self._used = set()
self._closing = False
self._closed = False
self._echo = echo
@property
def echo(self):
return self._echo
@property
def minsize(self):
return self._minsize
@property
def maxsize(self):
return self._free.maxlen
@property
def size(self):
return self.freesize + len(self._used) + self._acquiring
@property
def freesize(self):
return len(self._free)
@property
def closed(self):
return self._closed
def close(self):
"""Close pool.
Mark all pool connections to be closed on getting back to pool.
Closed pool doesn't allow to acquire new connections.
"""
if self._closed:
return
self._closing = True
async def wait_closed(self):
"""Wait for closing all pool's connections."""
if self._closed:
return
if not self._closing:
raise RuntimeError(".wait_closed() should be called "
"after .close()")
while self._free:
conn = self._free.popleft()
await conn.close()
with (await self._cond):
while self.size > self.freesize:
await self._cond.wait()
self._closed = True
def acquire(self):
"""Acquire free connection from the pool."""
coro = self._acquire()
return _PoolConnectionContextManager(coro, self)
async def _acquire(self):
if self._closing:
raise RuntimeError("Cannot acquire connection after closing pool")
with (await self._cond):
while True:
await self._fill_free_pool(True)
if self._free:
conn = self._free.popleft()
assert not conn.closed, conn
assert conn not in self._used, (conn, self._used)
self._used.add(conn)
return conn
else:
await self._cond.wait()
async def _fill_free_pool(self, override_min):
n, free = 0, len(self._free)
while n < free:
conn = self._free[-1]
if self._recycle > -1 \
and self._loop.time() - conn.last_usage > self._recycle:
await conn.close()
self._free.pop()
else:
self._free.rotate()
n += 1
while self.size < self.minsize:
self._acquiring += 1
try:
conn = await connect(echo=self._echo, loop=self._loop,
**self._conn_kwargs)
# raise exception if pool is closing
self._free.append(conn)
self._cond.notify()
finally:
self._acquiring -= 1
if self._free:
return
if override_min and self.size < self.maxsize:
self._acquiring += 1
try:
conn = await connect(echo=self._echo, loop=self._loop,
**self._conn_kwargs)
# raise exception if pool is closing
self._free.append(conn)
self._cond.notify()
finally:
self._acquiring -= 1
async def _wakeup(self):
with (await self._cond):
self._cond.notify()
async def release(self, conn):
"""Release free connection back to the connection pool.
"""
assert conn in self._used, (conn, self._used)
self._used.remove(conn)
if not conn.closed:
if self._closing:
await conn.close()
else:
self._free.append(conn)
await self._wakeup()
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
self.close()
await self.wait_closed()
|
aio-libs/aioodbc | aioodbc/pool.py | Pool.release | python | async def release(self, conn):
assert conn in self._used, (conn, self._used)
self._used.remove(conn)
if not conn.closed:
if self._closing:
await conn.close()
else:
self._free.append(conn)
await self._wakeup() | Release free connection back to the connection pool. | train | https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/pool.py#L174-L184 | [
"async def _wakeup(self):\n with (await self._cond):\n self._cond.notify()\n"
] | class Pool(asyncio.AbstractServer):
"""Connection pool"""
def __init__(self, minsize, maxsize, echo, loop, pool_recycle, **kwargs):
if minsize < 0:
raise ValueError("minsize should be zero or greater")
if maxsize < minsize:
raise ValueError("maxsize should be not less than minsize")
self._minsize = minsize
self._loop = loop
self._conn_kwargs = kwargs
self._acquiring = 0
self._recycle = pool_recycle
self._free = collections.deque(maxlen=maxsize)
self._cond = asyncio.Condition(loop=loop)
self._used = set()
self._closing = False
self._closed = False
self._echo = echo
@property
def echo(self):
return self._echo
@property
def minsize(self):
return self._minsize
@property
def maxsize(self):
return self._free.maxlen
@property
def size(self):
return self.freesize + len(self._used) + self._acquiring
@property
def freesize(self):
return len(self._free)
@property
def closed(self):
return self._closed
async def clear(self):
"""Close all free connections in pool."""
with (await self._cond):
while self._free:
conn = self._free.popleft()
await conn.close()
self._cond.notify()
def close(self):
"""Close pool.
Mark all pool connections to be closed on getting back to pool.
Closed pool doesn't allow to acquire new connections.
"""
if self._closed:
return
self._closing = True
async def wait_closed(self):
"""Wait for closing all pool's connections."""
if self._closed:
return
if not self._closing:
raise RuntimeError(".wait_closed() should be called "
"after .close()")
while self._free:
conn = self._free.popleft()
await conn.close()
with (await self._cond):
while self.size > self.freesize:
await self._cond.wait()
self._closed = True
def acquire(self):
"""Acquire free connection from the pool."""
coro = self._acquire()
return _PoolConnectionContextManager(coro, self)
async def _acquire(self):
if self._closing:
raise RuntimeError("Cannot acquire connection after closing pool")
with (await self._cond):
while True:
await self._fill_free_pool(True)
if self._free:
conn = self._free.popleft()
assert not conn.closed, conn
assert conn not in self._used, (conn, self._used)
self._used.add(conn)
return conn
else:
await self._cond.wait()
async def _fill_free_pool(self, override_min):
n, free = 0, len(self._free)
while n < free:
conn = self._free[-1]
if self._recycle > -1 \
and self._loop.time() - conn.last_usage > self._recycle:
await conn.close()
self._free.pop()
else:
self._free.rotate()
n += 1
while self.size < self.minsize:
self._acquiring += 1
try:
conn = await connect(echo=self._echo, loop=self._loop,
**self._conn_kwargs)
# raise exception if pool is closing
self._free.append(conn)
self._cond.notify()
finally:
self._acquiring -= 1
if self._free:
return
if override_min and self.size < self.maxsize:
self._acquiring += 1
try:
conn = await connect(echo=self._echo, loop=self._loop,
**self._conn_kwargs)
# raise exception if pool is closing
self._free.append(conn)
self._cond.notify()
finally:
self._acquiring -= 1
async def _wakeup(self):
with (await self._cond):
self._cond.notify()
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
self.close()
await self.wait_closed()
|
aio-libs/aioodbc | aioodbc/connection.py | connect | python | def connect(*, dsn, autocommit=False, ansi=False, timeout=0, loop=None,
executor=None, echo=False, after_created=None, **kwargs):
return _ContextManager(_connect(dsn=dsn, autocommit=autocommit,
ansi=ansi, timeout=timeout, loop=loop,
executor=executor, echo=echo,
after_created=after_created, **kwargs)) | Accepts an ODBC connection string and returns a new Connection object.
The connection string can be passed as the string `str`, as a list of
keywords,or a combination of the two. Any keywords except autocommit,
ansi, and timeout are simply added to the connection string.
:param autocommit bool: False or zero, the default, if True or non-zero,
the connection is put into ODBC autocommit mode and statements are
committed automatically.
:param ansi bool: By default, pyodbc first attempts to connect using
the Unicode version of SQLDriverConnectW. If the driver returns IM001
indicating it does not support the Unicode version, the ANSI version
is tried.
:param timeout int: An integer login timeout in seconds, used to set
the SQL_ATTR_LOGIN_TIMEOUT attribute of the connection. The default is
0 which means the database's default timeout, if any, is use
:param after_created callable: support customize configuration after
connection is connected. Must be an async unary function, or leave it
as None. | train | https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/connection.py#L15-L40 | [
"async def _connect(*, dsn, autocommit=False, ansi=False, timeout=0, loop=None,\n executor=None, echo=False, after_created=None, **kwargs):\n loop = loop or asyncio.get_event_loop()\n conn = Connection(dsn=dsn, autocommit=autocommit, ansi=ansi,\n timeout=timeout, echo=echo, loop=loop, executor=executor,\n after_created=after_created, **kwargs)\n await conn._connect()\n return conn\n"
] | import asyncio
import sys
import traceback
import warnings
from functools import partial
import pyodbc
from .cursor import Cursor
from .utils import _ContextManager
__all__ = ['connect', 'Connection']
async def _connect(*, dsn, autocommit=False, ansi=False, timeout=0, loop=None,
executor=None, echo=False, after_created=None, **kwargs):
loop = loop or asyncio.get_event_loop()
conn = Connection(dsn=dsn, autocommit=autocommit, ansi=ansi,
timeout=timeout, echo=echo, loop=loop, executor=executor,
after_created=after_created, **kwargs)
await conn._connect()
return conn
class Connection:
""" Connection objects manage connections to the database.
Connections should only be created by the aioodbc.connect function.
"""
_source_traceback = None
def __init__(self, *, dsn, autocommit=False, ansi=None,
timeout=0, executor=None, echo=False, loop=None,
after_created=None, **kwargs):
self._executor = executor
self._loop = loop or asyncio.get_event_loop()
self._conn = None
self._timeout = timeout
self._last_usage = self._loop.time()
self._autocommit = autocommit
self._ansi = ansi
self._dsn = dsn
self._echo = echo
self._posthook = after_created
self._kwargs = kwargs
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
def _execute(self, func, *args, **kwargs):
# execute function with args and kwargs in thread pool
func = partial(func, *args, **kwargs)
future = self._loop.run_in_executor(self._executor, func)
return future
async def _connect(self):
# create pyodbc connection
f = self._execute(pyodbc.connect, self._dsn,
autocommit=self._autocommit, ansi=self._ansi,
timeout=self._timeout,
**self._kwargs)
self._conn = await f
if self._posthook is not None:
await self._posthook(self._conn)
@property
def loop(self):
return self._loop
@property
def closed(self):
if self._conn:
return False
return True
@property
def autocommit(self):
"""Show autocommit mode for current database session. True if the
connection is in autocommit mode; False otherwise. The default
is False
"""
return self._conn.autocommit
@property
def timeout(self):
return self._conn.timeout
@property
def last_usage(self):
return self._last_usage
@property
def echo(self):
return self._echo
async def _cursor(self):
c = await self._execute(self._conn.cursor)
self._last_usage = self._loop.time()
connection = self
return Cursor(c, connection, echo=self._echo)
def cursor(self):
return _ContextManager(self._cursor())
async def close(self):
"""Close pyodbc connection"""
if not self._conn:
return
c = await self._execute(self._conn.close)
self._conn = None
return c
def commit(self):
"""Commit any pending transaction to the database."""
fut = self._execute(self._conn.commit)
return fut
def rollback(self):
"""Causes the database to roll back to the start of any pending
transaction.
"""
fut = self._execute(self._conn.rollback)
return fut
async def execute(self, sql, *args):
"""Create a new Cursor object, call its execute method, and return it.
See Cursor.execute for more details.This is a convenience method
that is not part of the DB API. Since a new Cursor is allocated
by each call, this should not be used if more than one SQL
statement needs to be executed.
:param sql: str, formated sql statement
:param args: tuple, arguments for construction of sql statement
"""
_cursor = await self._execute(self._conn.execute, sql, *args)
connection = self
cursor = Cursor(_cursor, connection, echo=self._echo)
return cursor
def getinfo(self, type_):
"""Returns general information about the driver and data source
associated with a connection by calling SQLGetInfo and returning its
results. See Microsoft's SQLGetInfo documentation for the types of
information available.
:param type_: int, pyodbc.SQL_* constant
"""
fut = self._execute(self._conn.getinfo, type_)
return fut
def add_output_converter(self, sqltype, func):
"""Register an output converter function that will be called whenever
a value with the given SQL type is read from the database.
:param sqltype: the integer SQL type value to convert, which can
be one of the defined standard constants (pyodbc.SQL_VARCHAR)
or a database-specific value (e.g. -151 for the SQL Server 2008
geometry data type).
:param func: the converter function which will be called with a
single parameter, the value, and should return the converted
value. If the value is NULL, the parameter will be None.
Otherwise it will be a Python string.
"""
fut = self._execute(self._conn.add_output_converter, sqltype, func)
return fut
def clear_output_converters(self):
"""Remove all output converter functions added by
add_output_converter.
"""
fut = self._execute(self._conn.clear_output_converters)
return fut
def set_attr(self, attr_id, value):
"""Calls SQLSetConnectAttr with the given values.
:param attr_id: the attribute ID (integer) to set. These are ODBC or
driver constants.
:parm value: the connection attribute value to set. At this time
only integer values are supported.
"""
fut = self._execute(self._conn.set_attr, attr_id, value)
return fut
def __del__(self):
if not self.closed:
# This will block the loop, please use close
# coroutine to close connection
self._conn.close()
self._conn = None
warnings.warn("Unclosed connection {!r}".format(self),
ResourceWarning)
context = {'connection': self,
'message': 'Unclosed connection'}
if self._source_traceback is not None:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
return
|
aio-libs/aioodbc | aioodbc/connection.py | Connection.close | python | async def close(self):
if not self._conn:
return
c = await self._execute(self._conn.close)
self._conn = None
return c | Close pyodbc connection | train | https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/connection.py#L133-L139 | [
"def _execute(self, func, *args, **kwargs):\n # execute function with args and kwargs in thread pool\n func = partial(func, *args, **kwargs)\n future = self._loop.run_in_executor(self._executor, func)\n return future\n"
] | class Connection:
""" Connection objects manage connections to the database.
Connections should only be created by the aioodbc.connect function.
"""
_source_traceback = None
def __init__(self, *, dsn, autocommit=False, ansi=None,
timeout=0, executor=None, echo=False, loop=None,
after_created=None, **kwargs):
self._executor = executor
self._loop = loop or asyncio.get_event_loop()
self._conn = None
self._timeout = timeout
self._last_usage = self._loop.time()
self._autocommit = autocommit
self._ansi = ansi
self._dsn = dsn
self._echo = echo
self._posthook = after_created
self._kwargs = kwargs
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
def _execute(self, func, *args, **kwargs):
# execute function with args and kwargs in thread pool
func = partial(func, *args, **kwargs)
future = self._loop.run_in_executor(self._executor, func)
return future
async def _connect(self):
# create pyodbc connection
f = self._execute(pyodbc.connect, self._dsn,
autocommit=self._autocommit, ansi=self._ansi,
timeout=self._timeout,
**self._kwargs)
self._conn = await f
if self._posthook is not None:
await self._posthook(self._conn)
@property
def loop(self):
return self._loop
@property
def closed(self):
if self._conn:
return False
return True
@property
def autocommit(self):
"""Show autocommit mode for current database session. True if the
connection is in autocommit mode; False otherwise. The default
is False
"""
return self._conn.autocommit
@property
def timeout(self):
return self._conn.timeout
@property
def last_usage(self):
return self._last_usage
@property
def echo(self):
return self._echo
async def _cursor(self):
c = await self._execute(self._conn.cursor)
self._last_usage = self._loop.time()
connection = self
return Cursor(c, connection, echo=self._echo)
def cursor(self):
return _ContextManager(self._cursor())
def commit(self):
"""Commit any pending transaction to the database."""
fut = self._execute(self._conn.commit)
return fut
def rollback(self):
"""Causes the database to roll back to the start of any pending
transaction.
"""
fut = self._execute(self._conn.rollback)
return fut
async def execute(self, sql, *args):
"""Create a new Cursor object, call its execute method, and return it.
See Cursor.execute for more details.This is a convenience method
that is not part of the DB API. Since a new Cursor is allocated
by each call, this should not be used if more than one SQL
statement needs to be executed.
:param sql: str, formated sql statement
:param args: tuple, arguments for construction of sql statement
"""
_cursor = await self._execute(self._conn.execute, sql, *args)
connection = self
cursor = Cursor(_cursor, connection, echo=self._echo)
return cursor
def getinfo(self, type_):
"""Returns general information about the driver and data source
associated with a connection by calling SQLGetInfo and returning its
results. See Microsoft's SQLGetInfo documentation for the types of
information available.
:param type_: int, pyodbc.SQL_* constant
"""
fut = self._execute(self._conn.getinfo, type_)
return fut
def add_output_converter(self, sqltype, func):
"""Register an output converter function that will be called whenever
a value with the given SQL type is read from the database.
:param sqltype: the integer SQL type value to convert, which can
be one of the defined standard constants (pyodbc.SQL_VARCHAR)
or a database-specific value (e.g. -151 for the SQL Server 2008
geometry data type).
:param func: the converter function which will be called with a
single parameter, the value, and should return the converted
value. If the value is NULL, the parameter will be None.
Otherwise it will be a Python string.
"""
fut = self._execute(self._conn.add_output_converter, sqltype, func)
return fut
def clear_output_converters(self):
"""Remove all output converter functions added by
add_output_converter.
"""
fut = self._execute(self._conn.clear_output_converters)
return fut
def set_attr(self, attr_id, value):
"""Calls SQLSetConnectAttr with the given values.
:param attr_id: the attribute ID (integer) to set. These are ODBC or
driver constants.
:parm value: the connection attribute value to set. At this time
only integer values are supported.
"""
fut = self._execute(self._conn.set_attr, attr_id, value)
return fut
def __del__(self):
if not self.closed:
# This will block the loop, please use close
# coroutine to close connection
self._conn.close()
self._conn = None
warnings.warn("Unclosed connection {!r}".format(self),
ResourceWarning)
context = {'connection': self,
'message': 'Unclosed connection'}
if self._source_traceback is not None:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
return
|
aio-libs/aioodbc | aioodbc/connection.py | Connection.execute | python | async def execute(self, sql, *args):
_cursor = await self._execute(self._conn.execute, sql, *args)
connection = self
cursor = Cursor(_cursor, connection, echo=self._echo)
return cursor | Create a new Cursor object, call its execute method, and return it.
See Cursor.execute for more details.This is a convenience method
that is not part of the DB API. Since a new Cursor is allocated
by each call, this should not be used if more than one SQL
statement needs to be executed.
:param sql: str, formated sql statement
:param args: tuple, arguments for construction of sql statement | train | https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/connection.py#L153-L167 | [
"def _execute(self, func, *args, **kwargs):\n # execute function with args and kwargs in thread pool\n func = partial(func, *args, **kwargs)\n future = self._loop.run_in_executor(self._executor, func)\n return future\n"
] | class Connection:
""" Connection objects manage connections to the database.
Connections should only be created by the aioodbc.connect function.
"""
_source_traceback = None
def __init__(self, *, dsn, autocommit=False, ansi=None,
timeout=0, executor=None, echo=False, loop=None,
after_created=None, **kwargs):
self._executor = executor
self._loop = loop or asyncio.get_event_loop()
self._conn = None
self._timeout = timeout
self._last_usage = self._loop.time()
self._autocommit = autocommit
self._ansi = ansi
self._dsn = dsn
self._echo = echo
self._posthook = after_created
self._kwargs = kwargs
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
def _execute(self, func, *args, **kwargs):
# execute function with args and kwargs in thread pool
func = partial(func, *args, **kwargs)
future = self._loop.run_in_executor(self._executor, func)
return future
async def _connect(self):
# create pyodbc connection
f = self._execute(pyodbc.connect, self._dsn,
autocommit=self._autocommit, ansi=self._ansi,
timeout=self._timeout,
**self._kwargs)
self._conn = await f
if self._posthook is not None:
await self._posthook(self._conn)
@property
def loop(self):
return self._loop
@property
def closed(self):
if self._conn:
return False
return True
@property
def autocommit(self):
"""Show autocommit mode for current database session. True if the
connection is in autocommit mode; False otherwise. The default
is False
"""
return self._conn.autocommit
@property
def timeout(self):
return self._conn.timeout
@property
def last_usage(self):
return self._last_usage
@property
def echo(self):
return self._echo
async def _cursor(self):
c = await self._execute(self._conn.cursor)
self._last_usage = self._loop.time()
connection = self
return Cursor(c, connection, echo=self._echo)
def cursor(self):
return _ContextManager(self._cursor())
async def close(self):
"""Close pyodbc connection"""
if not self._conn:
return
c = await self._execute(self._conn.close)
self._conn = None
return c
def commit(self):
"""Commit any pending transaction to the database."""
fut = self._execute(self._conn.commit)
return fut
def rollback(self):
"""Causes the database to roll back to the start of any pending
transaction.
"""
fut = self._execute(self._conn.rollback)
return fut
def getinfo(self, type_):
"""Returns general information about the driver and data source
associated with a connection by calling SQLGetInfo and returning its
results. See Microsoft's SQLGetInfo documentation for the types of
information available.
:param type_: int, pyodbc.SQL_* constant
"""
fut = self._execute(self._conn.getinfo, type_)
return fut
def add_output_converter(self, sqltype, func):
"""Register an output converter function that will be called whenever
a value with the given SQL type is read from the database.
:param sqltype: the integer SQL type value to convert, which can
be one of the defined standard constants (pyodbc.SQL_VARCHAR)
or a database-specific value (e.g. -151 for the SQL Server 2008
geometry data type).
:param func: the converter function which will be called with a
single parameter, the value, and should return the converted
value. If the value is NULL, the parameter will be None.
Otherwise it will be a Python string.
"""
fut = self._execute(self._conn.add_output_converter, sqltype, func)
return fut
def clear_output_converters(self):
"""Remove all output converter functions added by
add_output_converter.
"""
fut = self._execute(self._conn.clear_output_converters)
return fut
def set_attr(self, attr_id, value):
"""Calls SQLSetConnectAttr with the given values.
:param attr_id: the attribute ID (integer) to set. These are ODBC or
driver constants.
:parm value: the connection attribute value to set. At this time
only integer values are supported.
"""
fut = self._execute(self._conn.set_attr, attr_id, value)
return fut
def __del__(self):
if not self.closed:
# This will block the loop, please use close
# coroutine to close connection
self._conn.close()
self._conn = None
warnings.warn("Unclosed connection {!r}".format(self),
ResourceWarning)
context = {'connection': self,
'message': 'Unclosed connection'}
if self._source_traceback is not None:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
return
|
aio-libs/aioodbc | aioodbc/connection.py | Connection.getinfo | python | def getinfo(self, type_):
fut = self._execute(self._conn.getinfo, type_)
return fut | Returns general information about the driver and data source
associated with a connection by calling SQLGetInfo and returning its
results. See Microsoft's SQLGetInfo documentation for the types of
information available.
:param type_: int, pyodbc.SQL_* constant | train | https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/connection.py#L169-L178 | [
"def _execute(self, func, *args, **kwargs):\n # execute function with args and kwargs in thread pool\n func = partial(func, *args, **kwargs)\n future = self._loop.run_in_executor(self._executor, func)\n return future\n"
] | class Connection:
""" Connection objects manage connections to the database.
Connections should only be created by the aioodbc.connect function.
"""
_source_traceback = None
def __init__(self, *, dsn, autocommit=False, ansi=None,
timeout=0, executor=None, echo=False, loop=None,
after_created=None, **kwargs):
self._executor = executor
self._loop = loop or asyncio.get_event_loop()
self._conn = None
self._timeout = timeout
self._last_usage = self._loop.time()
self._autocommit = autocommit
self._ansi = ansi
self._dsn = dsn
self._echo = echo
self._posthook = after_created
self._kwargs = kwargs
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
def _execute(self, func, *args, **kwargs):
# execute function with args and kwargs in thread pool
func = partial(func, *args, **kwargs)
future = self._loop.run_in_executor(self._executor, func)
return future
async def _connect(self):
# create pyodbc connection
f = self._execute(pyodbc.connect, self._dsn,
autocommit=self._autocommit, ansi=self._ansi,
timeout=self._timeout,
**self._kwargs)
self._conn = await f
if self._posthook is not None:
await self._posthook(self._conn)
@property
def loop(self):
return self._loop
@property
def closed(self):
if self._conn:
return False
return True
@property
def autocommit(self):
"""Show autocommit mode for current database session. True if the
connection is in autocommit mode; False otherwise. The default
is False
"""
return self._conn.autocommit
@property
def timeout(self):
return self._conn.timeout
@property
def last_usage(self):
return self._last_usage
@property
def echo(self):
return self._echo
async def _cursor(self):
c = await self._execute(self._conn.cursor)
self._last_usage = self._loop.time()
connection = self
return Cursor(c, connection, echo=self._echo)
def cursor(self):
return _ContextManager(self._cursor())
async def close(self):
"""Close pyodbc connection"""
if not self._conn:
return
c = await self._execute(self._conn.close)
self._conn = None
return c
def commit(self):
"""Commit any pending transaction to the database."""
fut = self._execute(self._conn.commit)
return fut
def rollback(self):
"""Causes the database to roll back to the start of any pending
transaction.
"""
fut = self._execute(self._conn.rollback)
return fut
async def execute(self, sql, *args):
"""Create a new Cursor object, call its execute method, and return it.
See Cursor.execute for more details.This is a convenience method
that is not part of the DB API. Since a new Cursor is allocated
by each call, this should not be used if more than one SQL
statement needs to be executed.
:param sql: str, formated sql statement
:param args: tuple, arguments for construction of sql statement
"""
_cursor = await self._execute(self._conn.execute, sql, *args)
connection = self
cursor = Cursor(_cursor, connection, echo=self._echo)
return cursor
def add_output_converter(self, sqltype, func):
"""Register an output converter function that will be called whenever
a value with the given SQL type is read from the database.
:param sqltype: the integer SQL type value to convert, which can
be one of the defined standard constants (pyodbc.SQL_VARCHAR)
or a database-specific value (e.g. -151 for the SQL Server 2008
geometry data type).
:param func: the converter function which will be called with a
single parameter, the value, and should return the converted
value. If the value is NULL, the parameter will be None.
Otherwise it will be a Python string.
"""
fut = self._execute(self._conn.add_output_converter, sqltype, func)
return fut
def clear_output_converters(self):
"""Remove all output converter functions added by
add_output_converter.
"""
fut = self._execute(self._conn.clear_output_converters)
return fut
def set_attr(self, attr_id, value):
"""Calls SQLSetConnectAttr with the given values.
:param attr_id: the attribute ID (integer) to set. These are ODBC or
driver constants.
:parm value: the connection attribute value to set. At this time
only integer values are supported.
"""
fut = self._execute(self._conn.set_attr, attr_id, value)
return fut
def __del__(self):
if not self.closed:
# This will block the loop, please use close
# coroutine to close connection
self._conn.close()
self._conn = None
warnings.warn("Unclosed connection {!r}".format(self),
ResourceWarning)
context = {'connection': self,
'message': 'Unclosed connection'}
if self._source_traceback is not None:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
return
|
aio-libs/aioodbc | aioodbc/connection.py | Connection.add_output_converter | python | def add_output_converter(self, sqltype, func):
fut = self._execute(self._conn.add_output_converter, sqltype, func)
return fut | Register an output converter function that will be called whenever
a value with the given SQL type is read from the database.
:param sqltype: the integer SQL type value to convert, which can
be one of the defined standard constants (pyodbc.SQL_VARCHAR)
or a database-specific value (e.g. -151 for the SQL Server 2008
geometry data type).
:param func: the converter function which will be called with a
single parameter, the value, and should return the converted
value. If the value is NULL, the parameter will be None.
Otherwise it will be a Python string. | train | https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/connection.py#L180-L194 | [
"def _execute(self, func, *args, **kwargs):\n # execute function with args and kwargs in thread pool\n func = partial(func, *args, **kwargs)\n future = self._loop.run_in_executor(self._executor, func)\n return future\n"
] | class Connection:
""" Connection objects manage connections to the database.
Connections should only be created by the aioodbc.connect function.
"""
_source_traceback = None
def __init__(self, *, dsn, autocommit=False, ansi=None,
timeout=0, executor=None, echo=False, loop=None,
after_created=None, **kwargs):
self._executor = executor
self._loop = loop or asyncio.get_event_loop()
self._conn = None
self._timeout = timeout
self._last_usage = self._loop.time()
self._autocommit = autocommit
self._ansi = ansi
self._dsn = dsn
self._echo = echo
self._posthook = after_created
self._kwargs = kwargs
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
def _execute(self, func, *args, **kwargs):
# execute function with args and kwargs in thread pool
func = partial(func, *args, **kwargs)
future = self._loop.run_in_executor(self._executor, func)
return future
async def _connect(self):
# create pyodbc connection
f = self._execute(pyodbc.connect, self._dsn,
autocommit=self._autocommit, ansi=self._ansi,
timeout=self._timeout,
**self._kwargs)
self._conn = await f
if self._posthook is not None:
await self._posthook(self._conn)
@property
def loop(self):
return self._loop
@property
def closed(self):
if self._conn:
return False
return True
@property
def autocommit(self):
"""Show autocommit mode for current database session. True if the
connection is in autocommit mode; False otherwise. The default
is False
"""
return self._conn.autocommit
@property
def timeout(self):
return self._conn.timeout
@property
def last_usage(self):
return self._last_usage
@property
def echo(self):
return self._echo
async def _cursor(self):
c = await self._execute(self._conn.cursor)
self._last_usage = self._loop.time()
connection = self
return Cursor(c, connection, echo=self._echo)
def cursor(self):
return _ContextManager(self._cursor())
async def close(self):
"""Close pyodbc connection"""
if not self._conn:
return
c = await self._execute(self._conn.close)
self._conn = None
return c
def commit(self):
"""Commit any pending transaction to the database."""
fut = self._execute(self._conn.commit)
return fut
def rollback(self):
"""Causes the database to roll back to the start of any pending
transaction.
"""
fut = self._execute(self._conn.rollback)
return fut
async def execute(self, sql, *args):
"""Create a new Cursor object, call its execute method, and return it.
See Cursor.execute for more details.This is a convenience method
that is not part of the DB API. Since a new Cursor is allocated
by each call, this should not be used if more than one SQL
statement needs to be executed.
:param sql: str, formated sql statement
:param args: tuple, arguments for construction of sql statement
"""
_cursor = await self._execute(self._conn.execute, sql, *args)
connection = self
cursor = Cursor(_cursor, connection, echo=self._echo)
return cursor
def getinfo(self, type_):
"""Returns general information about the driver and data source
associated with a connection by calling SQLGetInfo and returning its
results. See Microsoft's SQLGetInfo documentation for the types of
information available.
:param type_: int, pyodbc.SQL_* constant
"""
fut = self._execute(self._conn.getinfo, type_)
return fut
def clear_output_converters(self):
"""Remove all output converter functions added by
add_output_converter.
"""
fut = self._execute(self._conn.clear_output_converters)
return fut
def set_attr(self, attr_id, value):
"""Calls SQLSetConnectAttr with the given values.
:param attr_id: the attribute ID (integer) to set. These are ODBC or
driver constants.
:parm value: the connection attribute value to set. At this time
only integer values are supported.
"""
fut = self._execute(self._conn.set_attr, attr_id, value)
return fut
def __del__(self):
if not self.closed:
# This will block the loop, please use close
# coroutine to close connection
self._conn.close()
self._conn = None
warnings.warn("Unclosed connection {!r}".format(self),
ResourceWarning)
context = {'connection': self,
'message': 'Unclosed connection'}
if self._source_traceback is not None:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
return
|
aio-libs/aioodbc | aioodbc/connection.py | Connection.set_attr | python | def set_attr(self, attr_id, value):
fut = self._execute(self._conn.set_attr, attr_id, value)
return fut | Calls SQLSetConnectAttr with the given values.
:param attr_id: the attribute ID (integer) to set. These are ODBC or
driver constants.
:parm value: the connection attribute value to set. At this time
only integer values are supported. | train | https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/connection.py#L203-L212 | [
"def _execute(self, func, *args, **kwargs):\n # execute function with args and kwargs in thread pool\n func = partial(func, *args, **kwargs)\n future = self._loop.run_in_executor(self._executor, func)\n return future\n"
] | class Connection:
""" Connection objects manage connections to the database.
Connections should only be created by the aioodbc.connect function.
"""
_source_traceback = None
def __init__(self, *, dsn, autocommit=False, ansi=None,
timeout=0, executor=None, echo=False, loop=None,
after_created=None, **kwargs):
self._executor = executor
self._loop = loop or asyncio.get_event_loop()
self._conn = None
self._timeout = timeout
self._last_usage = self._loop.time()
self._autocommit = autocommit
self._ansi = ansi
self._dsn = dsn
self._echo = echo
self._posthook = after_created
self._kwargs = kwargs
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
def _execute(self, func, *args, **kwargs):
# execute function with args and kwargs in thread pool
func = partial(func, *args, **kwargs)
future = self._loop.run_in_executor(self._executor, func)
return future
async def _connect(self):
# create pyodbc connection
f = self._execute(pyodbc.connect, self._dsn,
autocommit=self._autocommit, ansi=self._ansi,
timeout=self._timeout,
**self._kwargs)
self._conn = await f
if self._posthook is not None:
await self._posthook(self._conn)
@property
def loop(self):
return self._loop
@property
def closed(self):
if self._conn:
return False
return True
@property
def autocommit(self):
"""Show autocommit mode for current database session. True if the
connection is in autocommit mode; False otherwise. The default
is False
"""
return self._conn.autocommit
@property
def timeout(self):
return self._conn.timeout
@property
def last_usage(self):
return self._last_usage
@property
def echo(self):
return self._echo
async def _cursor(self):
c = await self._execute(self._conn.cursor)
self._last_usage = self._loop.time()
connection = self
return Cursor(c, connection, echo=self._echo)
def cursor(self):
return _ContextManager(self._cursor())
async def close(self):
"""Close pyodbc connection"""
if not self._conn:
return
c = await self._execute(self._conn.close)
self._conn = None
return c
def commit(self):
"""Commit any pending transaction to the database."""
fut = self._execute(self._conn.commit)
return fut
def rollback(self):
"""Causes the database to roll back to the start of any pending
transaction.
"""
fut = self._execute(self._conn.rollback)
return fut
async def execute(self, sql, *args):
"""Create a new Cursor object, call its execute method, and return it.
See Cursor.execute for more details.This is a convenience method
that is not part of the DB API. Since a new Cursor is allocated
by each call, this should not be used if more than one SQL
statement needs to be executed.
:param sql: str, formated sql statement
:param args: tuple, arguments for construction of sql statement
"""
_cursor = await self._execute(self._conn.execute, sql, *args)
connection = self
cursor = Cursor(_cursor, connection, echo=self._echo)
return cursor
def getinfo(self, type_):
"""Returns general information about the driver and data source
associated with a connection by calling SQLGetInfo and returning its
results. See Microsoft's SQLGetInfo documentation for the types of
information available.
:param type_: int, pyodbc.SQL_* constant
"""
fut = self._execute(self._conn.getinfo, type_)
return fut
def add_output_converter(self, sqltype, func):
"""Register an output converter function that will be called whenever
a value with the given SQL type is read from the database.
:param sqltype: the integer SQL type value to convert, which can
be one of the defined standard constants (pyodbc.SQL_VARCHAR)
or a database-specific value (e.g. -151 for the SQL Server 2008
geometry data type).
:param func: the converter function which will be called with a
single parameter, the value, and should return the converted
value. If the value is NULL, the parameter will be None.
Otherwise it will be a Python string.
"""
fut = self._execute(self._conn.add_output_converter, sqltype, func)
return fut
def clear_output_converters(self):
"""Remove all output converter functions added by
add_output_converter.
"""
fut = self._execute(self._conn.clear_output_converters)
return fut
def __del__(self):
if not self.closed:
# This will block the loop, please use close
# coroutine to close connection
self._conn.close()
self._conn = None
warnings.warn("Unclosed connection {!r}".format(self),
ResourceWarning)
context = {'connection': self,
'message': 'Unclosed connection'}
if self._source_traceback is not None:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
return
|
RedisJSON/rejson-py | rejson/client.py | bulk_of_jsons | python | def bulk_of_jsons(d):
"Replace serialized JSON values with objects in a bulk array response (list)"
def _f(b):
for index, item in enumerate(b):
if item is not None:
b[index] = d(item)
return b
return _f | Replace serialized JSON values with objects in a bulk array response (list) | train | https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L17-L24 | null | import six
import json
from redis import StrictRedis
from redis.client import Pipeline
from redis._compat import (long, nativestr)
from .path import Path
def str_path(p):
"Returns the string representation of a path if it is of class Path"
if isinstance(p, Path):
return p.strPath
else:
return p
class Client(StrictRedis):
"""
This class subclasses redis-py's `StrictRedis` and implements ReJSON's
commmands (prefixed with "json").
The client performs on-the-fly serialization/deserialization of objects
to/from JSON, and provides the ability to use a custom encoder/decoder.
"""
MODULE_INFO = {
'name': 'ReJSON',
'ver': 1
}
_encoder = None
_encode = None
_decoder = None
_decode = None
def __init__(self, encoder=None, decoder=None, *args, **kwargs):
"""
Creates a new ReJSON client.
``encoder`` should be an instance of a ``json.JSONEncoder`` class
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
self.setEncoder(encoder)
self.setDecoder(decoder)
StrictRedis.__init__(self, *args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
'JSON.DEL': long,
'JSON.GET': self._decode,
'JSON.MGET': bulk_of_jsons(self._decode),
'JSON.SET': lambda r: r and nativestr(r) == 'OK',
'JSON.NUMINCRBY': self._decode,
'JSON.NUMMULTBY': self._decode,
'JSON.STRAPPEND': long,
'JSON.STRLEN': long,
'JSON.ARRAPPEND': long,
'JSON.ARRINDEX': long,
'JSON.ARRINSERT': long,
'JSON.ARRLEN': long,
'JSON.ARRPOP': self._decode,
'JSON.ARRTRIM': long,
'JSON.OBJLEN': long,
}
for k, v in six.iteritems(MODULE_CALLBACKS):
self.set_response_callback(k, v)
def setEncoder(self, encoder):
"""
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
"""
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode
def setDecoder(self, decoder):
"""
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode
def jsondel(self, name, path=Path.rootPath()):
"""
Deletes the JSON value stored at key ``name`` under ``path``
"""
return self.execute_command('JSON.DEL', name, str_path(path))
def jsonget(self, name, *args):
"""
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
"""
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None
def jsonmget(self, path, *args):
"""
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
"""
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces)
def jsonset(self, name, path, obj, nx=False, xx=False):
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
"""
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces)
def jsontype(self, name, path=Path.rootPath()):
"""
Gets the type of the JSON value under ``path`` from key ``name``
"""
return self.execute_command('JSON.TYPE', name, str_path(path))
def jsonnumincrby(self, name, path, number):
"""
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
"""
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number))
def jsonnummultby(self, name, path, number):
"""
Multiplies the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``
"""
return self.execute_command('JSON.NUMMULTBY', name, str_path(path), self._encode(number))
def jsonstrappend(self, name, string, path=Path.rootPath()):
"""
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
"""
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string))
def jsonstrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the string JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.STRLEN', name, str_path(path))
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces)
def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
"""
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
"""
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
def jsonarrinsert(self, name, path, index, *args):
"""
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
"""
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces)
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path))
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
def jsonarrtrim(self, name, path, start, stop):
"""
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
"""
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
def jsonobjkeys(self, name, path=Path.rootPath()):
"""
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJKEYS', name, str_path(path))
def jsonobjlen(self, name, path=Path.rootPath()):
"""
Returns the length of the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJLEN', name, str_path(path))
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
Overridden in order to provide the right client through the pipeline.
"""
p = Pipeline(
connection_pool=self.connection_pool,
response_callbacks=self.response_callbacks,
transaction=transaction,
shard_hint=shard_hint)
p.setEncoder(self._encoder)
p.setDecoder(self._decoder)
return p
class Pipeline(Pipeline, Client):
"Pipeline for ReJSONClient"
|
RedisJSON/rejson-py | rejson/client.py | Client.setEncoder | python | def setEncoder(self, encoder):
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode | Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class | train | https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L78-L87 | null | class Client(StrictRedis):
"""
This class subclasses redis-py's `StrictRedis` and implements ReJSON's
commmands (prefixed with "json").
The client performs on-the-fly serialization/deserialization of objects
to/from JSON, and provides the ability to use a custom encoder/decoder.
"""
MODULE_INFO = {
'name': 'ReJSON',
'ver': 1
}
_encoder = None
_encode = None
_decoder = None
_decode = None
def __init__(self, encoder=None, decoder=None, *args, **kwargs):
"""
Creates a new ReJSON client.
``encoder`` should be an instance of a ``json.JSONEncoder`` class
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
self.setEncoder(encoder)
self.setDecoder(decoder)
StrictRedis.__init__(self, *args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
'JSON.DEL': long,
'JSON.GET': self._decode,
'JSON.MGET': bulk_of_jsons(self._decode),
'JSON.SET': lambda r: r and nativestr(r) == 'OK',
'JSON.NUMINCRBY': self._decode,
'JSON.NUMMULTBY': self._decode,
'JSON.STRAPPEND': long,
'JSON.STRLEN': long,
'JSON.ARRAPPEND': long,
'JSON.ARRINDEX': long,
'JSON.ARRINSERT': long,
'JSON.ARRLEN': long,
'JSON.ARRPOP': self._decode,
'JSON.ARRTRIM': long,
'JSON.OBJLEN': long,
}
for k, v in six.iteritems(MODULE_CALLBACKS):
self.set_response_callback(k, v)
def setDecoder(self, decoder):
"""
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode
def jsondel(self, name, path=Path.rootPath()):
"""
Deletes the JSON value stored at key ``name`` under ``path``
"""
return self.execute_command('JSON.DEL', name, str_path(path))
def jsonget(self, name, *args):
"""
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
"""
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None
def jsonmget(self, path, *args):
"""
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
"""
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces)
def jsonset(self, name, path, obj, nx=False, xx=False):
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
"""
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces)
def jsontype(self, name, path=Path.rootPath()):
"""
Gets the type of the JSON value under ``path`` from key ``name``
"""
return self.execute_command('JSON.TYPE', name, str_path(path))
def jsonnumincrby(self, name, path, number):
"""
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
"""
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number))
def jsonnummultby(self, name, path, number):
"""
Multiplies the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``
"""
return self.execute_command('JSON.NUMMULTBY', name, str_path(path), self._encode(number))
def jsonstrappend(self, name, string, path=Path.rootPath()):
"""
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
"""
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string))
def jsonstrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the string JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.STRLEN', name, str_path(path))
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces)
def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
"""
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
"""
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
def jsonarrinsert(self, name, path, index, *args):
"""
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
"""
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces)
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path))
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
def jsonarrtrim(self, name, path, start, stop):
"""
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
"""
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
def jsonobjkeys(self, name, path=Path.rootPath()):
"""
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJKEYS', name, str_path(path))
def jsonobjlen(self, name, path=Path.rootPath()):
"""
Returns the length of the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJLEN', name, str_path(path))
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
Overridden in order to provide the right client through the pipeline.
"""
p = Pipeline(
connection_pool=self.connection_pool,
response_callbacks=self.response_callbacks,
transaction=transaction,
shard_hint=shard_hint)
p.setEncoder(self._encoder)
p.setDecoder(self._decoder)
return p
|
RedisJSON/rejson-py | rejson/client.py | Client.setDecoder | python | def setDecoder(self, decoder):
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode | Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class | train | https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L89-L98 | null | class Client(StrictRedis):
"""
This class subclasses redis-py's `StrictRedis` and implements ReJSON's
commmands (prefixed with "json").
The client performs on-the-fly serialization/deserialization of objects
to/from JSON, and provides the ability to use a custom encoder/decoder.
"""
MODULE_INFO = {
'name': 'ReJSON',
'ver': 1
}
_encoder = None
_encode = None
_decoder = None
_decode = None
def __init__(self, encoder=None, decoder=None, *args, **kwargs):
"""
Creates a new ReJSON client.
``encoder`` should be an instance of a ``json.JSONEncoder`` class
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
self.setEncoder(encoder)
self.setDecoder(decoder)
StrictRedis.__init__(self, *args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
'JSON.DEL': long,
'JSON.GET': self._decode,
'JSON.MGET': bulk_of_jsons(self._decode),
'JSON.SET': lambda r: r and nativestr(r) == 'OK',
'JSON.NUMINCRBY': self._decode,
'JSON.NUMMULTBY': self._decode,
'JSON.STRAPPEND': long,
'JSON.STRLEN': long,
'JSON.ARRAPPEND': long,
'JSON.ARRINDEX': long,
'JSON.ARRINSERT': long,
'JSON.ARRLEN': long,
'JSON.ARRPOP': self._decode,
'JSON.ARRTRIM': long,
'JSON.OBJLEN': long,
}
for k, v in six.iteritems(MODULE_CALLBACKS):
self.set_response_callback(k, v)
def setEncoder(self, encoder):
"""
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
"""
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode
def jsondel(self, name, path=Path.rootPath()):
"""
Deletes the JSON value stored at key ``name`` under ``path``
"""
return self.execute_command('JSON.DEL', name, str_path(path))
def jsonget(self, name, *args):
"""
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
"""
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None
def jsonmget(self, path, *args):
"""
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
"""
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces)
def jsonset(self, name, path, obj, nx=False, xx=False):
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
"""
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces)
def jsontype(self, name, path=Path.rootPath()):
"""
Gets the type of the JSON value under ``path`` from key ``name``
"""
return self.execute_command('JSON.TYPE', name, str_path(path))
def jsonnumincrby(self, name, path, number):
"""
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
"""
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number))
def jsonnummultby(self, name, path, number):
"""
Multiplies the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``
"""
return self.execute_command('JSON.NUMMULTBY', name, str_path(path), self._encode(number))
def jsonstrappend(self, name, string, path=Path.rootPath()):
"""
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
"""
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string))
def jsonstrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the string JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.STRLEN', name, str_path(path))
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces)
def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
"""
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
"""
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
def jsonarrinsert(self, name, path, index, *args):
"""
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
"""
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces)
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path))
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
def jsonarrtrim(self, name, path, start, stop):
"""
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
"""
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
def jsonobjkeys(self, name, path=Path.rootPath()):
"""
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJKEYS', name, str_path(path))
def jsonobjlen(self, name, path=Path.rootPath()):
"""
Returns the length of the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJLEN', name, str_path(path))
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
Overridden in order to provide the right client through the pipeline.
"""
p = Pipeline(
connection_pool=self.connection_pool,
response_callbacks=self.response_callbacks,
transaction=transaction,
shard_hint=shard_hint)
p.setEncoder(self._encoder)
p.setDecoder(self._decoder)
return p
|
RedisJSON/rejson-py | rejson/client.py | Client.jsondel | python | def jsondel(self, name, path=Path.rootPath()):
return self.execute_command('JSON.DEL', name, str_path(path)) | Deletes the JSON value stored at key ``name`` under ``path`` | train | https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L100-L104 | [
"def str_path(p):\n \"Returns the string representation of a path if it is of class Path\"\n if isinstance(p, Path):\n return p.strPath\n else:\n return p\n"
] | class Client(StrictRedis):
"""
This class subclasses redis-py's `StrictRedis` and implements ReJSON's
commmands (prefixed with "json").
The client performs on-the-fly serialization/deserialization of objects
to/from JSON, and provides the ability to use a custom encoder/decoder.
"""
MODULE_INFO = {
'name': 'ReJSON',
'ver': 1
}
_encoder = None
_encode = None
_decoder = None
_decode = None
def __init__(self, encoder=None, decoder=None, *args, **kwargs):
"""
Creates a new ReJSON client.
``encoder`` should be an instance of a ``json.JSONEncoder`` class
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
self.setEncoder(encoder)
self.setDecoder(decoder)
StrictRedis.__init__(self, *args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
'JSON.DEL': long,
'JSON.GET': self._decode,
'JSON.MGET': bulk_of_jsons(self._decode),
'JSON.SET': lambda r: r and nativestr(r) == 'OK',
'JSON.NUMINCRBY': self._decode,
'JSON.NUMMULTBY': self._decode,
'JSON.STRAPPEND': long,
'JSON.STRLEN': long,
'JSON.ARRAPPEND': long,
'JSON.ARRINDEX': long,
'JSON.ARRINSERT': long,
'JSON.ARRLEN': long,
'JSON.ARRPOP': self._decode,
'JSON.ARRTRIM': long,
'JSON.OBJLEN': long,
}
for k, v in six.iteritems(MODULE_CALLBACKS):
self.set_response_callback(k, v)
def setEncoder(self, encoder):
"""
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
"""
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode
def setDecoder(self, decoder):
"""
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode
def jsonget(self, name, *args):
"""
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
"""
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None
def jsonmget(self, path, *args):
"""
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
"""
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces)
def jsonset(self, name, path, obj, nx=False, xx=False):
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
"""
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces)
def jsontype(self, name, path=Path.rootPath()):
"""
Gets the type of the JSON value under ``path`` from key ``name``
"""
return self.execute_command('JSON.TYPE', name, str_path(path))
def jsonnumincrby(self, name, path, number):
"""
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
"""
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number))
def jsonnummultby(self, name, path, number):
"""
Multiplies the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``
"""
return self.execute_command('JSON.NUMMULTBY', name, str_path(path), self._encode(number))
def jsonstrappend(self, name, string, path=Path.rootPath()):
"""
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
"""
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string))
def jsonstrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the string JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.STRLEN', name, str_path(path))
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces)
def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
"""
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
"""
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
def jsonarrinsert(self, name, path, index, *args):
"""
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
"""
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces)
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path))
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
def jsonarrtrim(self, name, path, start, stop):
"""
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
"""
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
def jsonobjkeys(self, name, path=Path.rootPath()):
"""
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJKEYS', name, str_path(path))
def jsonobjlen(self, name, path=Path.rootPath()):
"""
Returns the length of the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJLEN', name, str_path(path))
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
Overridden in order to provide the right client through the pipeline.
"""
p = Pipeline(
connection_pool=self.connection_pool,
response_callbacks=self.response_callbacks,
transaction=transaction,
shard_hint=shard_hint)
p.setEncoder(self._encoder)
p.setDecoder(self._decoder)
return p
|
RedisJSON/rejson-py | rejson/client.py | Client.jsonget | python | def jsonget(self, name, *args):
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None | Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path | train | https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L106-L123 | [
"def str_path(p):\n \"Returns the string representation of a path if it is of class Path\"\n if isinstance(p, Path):\n return p.strPath\n else:\n return p\n",
"def rootPath():\n \"Returns the root path's string representation\"\n return '.'\n"
] | class Client(StrictRedis):
"""
This class subclasses redis-py's `StrictRedis` and implements ReJSON's
commmands (prefixed with "json").
The client performs on-the-fly serialization/deserialization of objects
to/from JSON, and provides the ability to use a custom encoder/decoder.
"""
MODULE_INFO = {
'name': 'ReJSON',
'ver': 1
}
_encoder = None
_encode = None
_decoder = None
_decode = None
def __init__(self, encoder=None, decoder=None, *args, **kwargs):
"""
Creates a new ReJSON client.
``encoder`` should be an instance of a ``json.JSONEncoder`` class
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
self.setEncoder(encoder)
self.setDecoder(decoder)
StrictRedis.__init__(self, *args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
'JSON.DEL': long,
'JSON.GET': self._decode,
'JSON.MGET': bulk_of_jsons(self._decode),
'JSON.SET': lambda r: r and nativestr(r) == 'OK',
'JSON.NUMINCRBY': self._decode,
'JSON.NUMMULTBY': self._decode,
'JSON.STRAPPEND': long,
'JSON.STRLEN': long,
'JSON.ARRAPPEND': long,
'JSON.ARRINDEX': long,
'JSON.ARRINSERT': long,
'JSON.ARRLEN': long,
'JSON.ARRPOP': self._decode,
'JSON.ARRTRIM': long,
'JSON.OBJLEN': long,
}
for k, v in six.iteritems(MODULE_CALLBACKS):
self.set_response_callback(k, v)
def setEncoder(self, encoder):
"""
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
"""
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode
def setDecoder(self, decoder):
"""
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode
def jsondel(self, name, path=Path.rootPath()):
"""
Deletes the JSON value stored at key ``name`` under ``path``
"""
return self.execute_command('JSON.DEL', name, str_path(path))
def jsonmget(self, path, *args):
"""
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
"""
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces)
def jsonset(self, name, path, obj, nx=False, xx=False):
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
"""
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces)
def jsontype(self, name, path=Path.rootPath()):
"""
Gets the type of the JSON value under ``path`` from key ``name``
"""
return self.execute_command('JSON.TYPE', name, str_path(path))
def jsonnumincrby(self, name, path, number):
"""
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
"""
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number))
def jsonnummultby(self, name, path, number):
"""
Multiplies the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``
"""
return self.execute_command('JSON.NUMMULTBY', name, str_path(path), self._encode(number))
def jsonstrappend(self, name, string, path=Path.rootPath()):
"""
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
"""
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string))
def jsonstrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the string JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.STRLEN', name, str_path(path))
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces)
def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
"""
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
"""
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
def jsonarrinsert(self, name, path, index, *args):
"""
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
"""
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces)
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path))
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
def jsonarrtrim(self, name, path, start, stop):
"""
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
"""
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
def jsonobjkeys(self, name, path=Path.rootPath()):
"""
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJKEYS', name, str_path(path))
def jsonobjlen(self, name, path=Path.rootPath()):
"""
Returns the length of the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJLEN', name, str_path(path))
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
Overridden in order to provide the right client through the pipeline.
"""
p = Pipeline(
connection_pool=self.connection_pool,
response_callbacks=self.response_callbacks,
transaction=transaction,
shard_hint=shard_hint)
p.setEncoder(self._encoder)
p.setDecoder(self._decoder)
return p
|
RedisJSON/rejson-py | rejson/client.py | Client.jsonmget | python | def jsonmget(self, path, *args):
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces) | Gets the objects stored as a JSON values under ``path`` from
keys ``args`` | train | https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L125-L133 | [
"def str_path(p):\n \"Returns the string representation of a path if it is of class Path\"\n if isinstance(p, Path):\n return p.strPath\n else:\n return p\n"
] | class Client(StrictRedis):
"""
This class subclasses redis-py's `StrictRedis` and implements ReJSON's
commmands (prefixed with "json").
The client performs on-the-fly serialization/deserialization of objects
to/from JSON, and provides the ability to use a custom encoder/decoder.
"""
MODULE_INFO = {
'name': 'ReJSON',
'ver': 1
}
_encoder = None
_encode = None
_decoder = None
_decode = None
def __init__(self, encoder=None, decoder=None, *args, **kwargs):
"""
Creates a new ReJSON client.
``encoder`` should be an instance of a ``json.JSONEncoder`` class
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
self.setEncoder(encoder)
self.setDecoder(decoder)
StrictRedis.__init__(self, *args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
'JSON.DEL': long,
'JSON.GET': self._decode,
'JSON.MGET': bulk_of_jsons(self._decode),
'JSON.SET': lambda r: r and nativestr(r) == 'OK',
'JSON.NUMINCRBY': self._decode,
'JSON.NUMMULTBY': self._decode,
'JSON.STRAPPEND': long,
'JSON.STRLEN': long,
'JSON.ARRAPPEND': long,
'JSON.ARRINDEX': long,
'JSON.ARRINSERT': long,
'JSON.ARRLEN': long,
'JSON.ARRPOP': self._decode,
'JSON.ARRTRIM': long,
'JSON.OBJLEN': long,
}
for k, v in six.iteritems(MODULE_CALLBACKS):
self.set_response_callback(k, v)
def setEncoder(self, encoder):
"""
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
"""
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode
def setDecoder(self, decoder):
"""
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode
def jsondel(self, name, path=Path.rootPath()):
"""
Deletes the JSON value stored at key ``name`` under ``path``
"""
return self.execute_command('JSON.DEL', name, str_path(path))
def jsonget(self, name, *args):
"""
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
"""
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None
def jsonset(self, name, path, obj, nx=False, xx=False):
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
"""
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces)
def jsontype(self, name, path=Path.rootPath()):
"""
Gets the type of the JSON value under ``path`` from key ``name``
"""
return self.execute_command('JSON.TYPE', name, str_path(path))
def jsonnumincrby(self, name, path, number):
"""
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
"""
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number))
def jsonnummultby(self, name, path, number):
"""
Multiplies the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``
"""
return self.execute_command('JSON.NUMMULTBY', name, str_path(path), self._encode(number))
def jsonstrappend(self, name, string, path=Path.rootPath()):
"""
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
"""
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string))
def jsonstrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the string JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.STRLEN', name, str_path(path))
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces)
def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
"""
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
"""
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
def jsonarrinsert(self, name, path, index, *args):
"""
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
"""
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces)
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path))
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
def jsonarrtrim(self, name, path, start, stop):
"""
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
"""
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
def jsonobjkeys(self, name, path=Path.rootPath()):
"""
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJKEYS', name, str_path(path))
def jsonobjlen(self, name, path=Path.rootPath()):
"""
Returns the length of the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJLEN', name, str_path(path))
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
Overridden in order to provide the right client through the pipeline.
"""
p = Pipeline(
connection_pool=self.connection_pool,
response_callbacks=self.response_callbacks,
transaction=transaction,
shard_hint=shard_hint)
p.setEncoder(self._encoder)
p.setDecoder(self._decoder)
return p
|
RedisJSON/rejson-py | rejson/client.py | Client.jsonset | python | def jsonset(self, name, path, obj, nx=False, xx=False):
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces) | Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists | train | https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L135-L151 | [
"def str_path(p):\n \"Returns the string representation of a path if it is of class Path\"\n if isinstance(p, Path):\n return p.strPath\n else:\n return p\n"
] | class Client(StrictRedis):
"""
This class subclasses redis-py's `StrictRedis` and implements ReJSON's
commmands (prefixed with "json").
The client performs on-the-fly serialization/deserialization of objects
to/from JSON, and provides the ability to use a custom encoder/decoder.
"""
MODULE_INFO = {
'name': 'ReJSON',
'ver': 1
}
_encoder = None
_encode = None
_decoder = None
_decode = None
def __init__(self, encoder=None, decoder=None, *args, **kwargs):
"""
Creates a new ReJSON client.
``encoder`` should be an instance of a ``json.JSONEncoder`` class
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
self.setEncoder(encoder)
self.setDecoder(decoder)
StrictRedis.__init__(self, *args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
'JSON.DEL': long,
'JSON.GET': self._decode,
'JSON.MGET': bulk_of_jsons(self._decode),
'JSON.SET': lambda r: r and nativestr(r) == 'OK',
'JSON.NUMINCRBY': self._decode,
'JSON.NUMMULTBY': self._decode,
'JSON.STRAPPEND': long,
'JSON.STRLEN': long,
'JSON.ARRAPPEND': long,
'JSON.ARRINDEX': long,
'JSON.ARRINSERT': long,
'JSON.ARRLEN': long,
'JSON.ARRPOP': self._decode,
'JSON.ARRTRIM': long,
'JSON.OBJLEN': long,
}
for k, v in six.iteritems(MODULE_CALLBACKS):
self.set_response_callback(k, v)
def setEncoder(self, encoder):
"""
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
"""
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode
def setDecoder(self, decoder):
"""
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode
def jsondel(self, name, path=Path.rootPath()):
"""
Deletes the JSON value stored at key ``name`` under ``path``
"""
return self.execute_command('JSON.DEL', name, str_path(path))
def jsonget(self, name, *args):
"""
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
"""
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None
def jsonmget(self, path, *args):
"""
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
"""
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces)
def jsontype(self, name, path=Path.rootPath()):
"""
Gets the type of the JSON value under ``path`` from key ``name``
"""
return self.execute_command('JSON.TYPE', name, str_path(path))
def jsonnumincrby(self, name, path, number):
"""
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
"""
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number))
def jsonnummultby(self, name, path, number):
"""
Multiplies the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``
"""
return self.execute_command('JSON.NUMMULTBY', name, str_path(path), self._encode(number))
def jsonstrappend(self, name, string, path=Path.rootPath()):
"""
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
"""
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string))
def jsonstrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the string JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.STRLEN', name, str_path(path))
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces)
def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
"""
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
"""
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
def jsonarrinsert(self, name, path, index, *args):
"""
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
"""
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces)
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path))
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
def jsonarrtrim(self, name, path, start, stop):
"""
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
"""
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
def jsonobjkeys(self, name, path=Path.rootPath()):
"""
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJKEYS', name, str_path(path))
def jsonobjlen(self, name, path=Path.rootPath()):
"""
Returns the length of the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJLEN', name, str_path(path))
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
Overridden in order to provide the right client through the pipeline.
"""
p = Pipeline(
connection_pool=self.connection_pool,
response_callbacks=self.response_callbacks,
transaction=transaction,
shard_hint=shard_hint)
p.setEncoder(self._encoder)
p.setDecoder(self._decoder)
return p
|
RedisJSON/rejson-py | rejson/client.py | Client.jsontype | python | def jsontype(self, name, path=Path.rootPath()):
return self.execute_command('JSON.TYPE', name, str_path(path)) | Gets the type of the JSON value under ``path`` from key ``name`` | train | https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L153-L157 | [
"def str_path(p):\n \"Returns the string representation of a path if it is of class Path\"\n if isinstance(p, Path):\n return p.strPath\n else:\n return p\n"
] | class Client(StrictRedis):
"""
This class subclasses redis-py's `StrictRedis` and implements ReJSON's
commmands (prefixed with "json").
The client performs on-the-fly serialization/deserialization of objects
to/from JSON, and provides the ability to use a custom encoder/decoder.
"""
MODULE_INFO = {
'name': 'ReJSON',
'ver': 1
}
_encoder = None
_encode = None
_decoder = None
_decode = None
def __init__(self, encoder=None, decoder=None, *args, **kwargs):
"""
Creates a new ReJSON client.
``encoder`` should be an instance of a ``json.JSONEncoder`` class
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
self.setEncoder(encoder)
self.setDecoder(decoder)
StrictRedis.__init__(self, *args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
'JSON.DEL': long,
'JSON.GET': self._decode,
'JSON.MGET': bulk_of_jsons(self._decode),
'JSON.SET': lambda r: r and nativestr(r) == 'OK',
'JSON.NUMINCRBY': self._decode,
'JSON.NUMMULTBY': self._decode,
'JSON.STRAPPEND': long,
'JSON.STRLEN': long,
'JSON.ARRAPPEND': long,
'JSON.ARRINDEX': long,
'JSON.ARRINSERT': long,
'JSON.ARRLEN': long,
'JSON.ARRPOP': self._decode,
'JSON.ARRTRIM': long,
'JSON.OBJLEN': long,
}
for k, v in six.iteritems(MODULE_CALLBACKS):
self.set_response_callback(k, v)
def setEncoder(self, encoder):
"""
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
"""
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode
def setDecoder(self, decoder):
"""
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode
def jsondel(self, name, path=Path.rootPath()):
"""
Deletes the JSON value stored at key ``name`` under ``path``
"""
return self.execute_command('JSON.DEL', name, str_path(path))
def jsonget(self, name, *args):
"""
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
"""
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None
def jsonmget(self, path, *args):
"""
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
"""
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces)
def jsonset(self, name, path, obj, nx=False, xx=False):
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
"""
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces)
def jsonnumincrby(self, name, path, number):
"""
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
"""
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number))
def jsonnummultby(self, name, path, number):
"""
Multiplies the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``
"""
return self.execute_command('JSON.NUMMULTBY', name, str_path(path), self._encode(number))
def jsonstrappend(self, name, string, path=Path.rootPath()):
"""
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
"""
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string))
def jsonstrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the string JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.STRLEN', name, str_path(path))
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces)
def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
"""
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
"""
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
def jsonarrinsert(self, name, path, index, *args):
"""
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
"""
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces)
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path))
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
def jsonarrtrim(self, name, path, start, stop):
"""
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
"""
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
def jsonobjkeys(self, name, path=Path.rootPath()):
"""
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJKEYS', name, str_path(path))
def jsonobjlen(self, name, path=Path.rootPath()):
"""
Returns the length of the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJLEN', name, str_path(path))
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
Overridden in order to provide the right client through the pipeline.
"""
p = Pipeline(
connection_pool=self.connection_pool,
response_callbacks=self.response_callbacks,
transaction=transaction,
shard_hint=shard_hint)
p.setEncoder(self._encoder)
p.setDecoder(self._decoder)
return p
|
RedisJSON/rejson-py | rejson/client.py | Client.jsonnumincrby | python | def jsonnumincrby(self, name, path, number):
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number)) | Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number`` | train | https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L159-L164 | [
"def str_path(p):\n \"Returns the string representation of a path if it is of class Path\"\n if isinstance(p, Path):\n return p.strPath\n else:\n return p\n"
] | class Client(StrictRedis):
"""
This class subclasses redis-py's `StrictRedis` and implements ReJSON's
commmands (prefixed with "json").
The client performs on-the-fly serialization/deserialization of objects
to/from JSON, and provides the ability to use a custom encoder/decoder.
"""
MODULE_INFO = {
'name': 'ReJSON',
'ver': 1
}
_encoder = None
_encode = None
_decoder = None
_decode = None
def __init__(self, encoder=None, decoder=None, *args, **kwargs):
"""
Creates a new ReJSON client.
``encoder`` should be an instance of a ``json.JSONEncoder`` class
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
self.setEncoder(encoder)
self.setDecoder(decoder)
StrictRedis.__init__(self, *args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
'JSON.DEL': long,
'JSON.GET': self._decode,
'JSON.MGET': bulk_of_jsons(self._decode),
'JSON.SET': lambda r: r and nativestr(r) == 'OK',
'JSON.NUMINCRBY': self._decode,
'JSON.NUMMULTBY': self._decode,
'JSON.STRAPPEND': long,
'JSON.STRLEN': long,
'JSON.ARRAPPEND': long,
'JSON.ARRINDEX': long,
'JSON.ARRINSERT': long,
'JSON.ARRLEN': long,
'JSON.ARRPOP': self._decode,
'JSON.ARRTRIM': long,
'JSON.OBJLEN': long,
}
for k, v in six.iteritems(MODULE_CALLBACKS):
self.set_response_callback(k, v)
def setEncoder(self, encoder):
"""
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
"""
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode
def setDecoder(self, decoder):
"""
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode
def jsondel(self, name, path=Path.rootPath()):
"""
Deletes the JSON value stored at key ``name`` under ``path``
"""
return self.execute_command('JSON.DEL', name, str_path(path))
def jsonget(self, name, *args):
"""
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
"""
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None
def jsonmget(self, path, *args):
"""
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
"""
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces)
def jsonset(self, name, path, obj, nx=False, xx=False):
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
"""
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces)
def jsontype(self, name, path=Path.rootPath()):
"""
Gets the type of the JSON value under ``path`` from key ``name``
"""
return self.execute_command('JSON.TYPE', name, str_path(path))
def jsonnummultby(self, name, path, number):
"""
Multiplies the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``
"""
return self.execute_command('JSON.NUMMULTBY', name, str_path(path), self._encode(number))
def jsonstrappend(self, name, string, path=Path.rootPath()):
"""
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
"""
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string))
def jsonstrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the string JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.STRLEN', name, str_path(path))
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces)
def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
"""
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
"""
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
def jsonarrinsert(self, name, path, index, *args):
"""
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
"""
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces)
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path))
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
def jsonarrtrim(self, name, path, start, stop):
"""
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
"""
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
def jsonobjkeys(self, name, path=Path.rootPath()):
"""
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJKEYS', name, str_path(path))
def jsonobjlen(self, name, path=Path.rootPath()):
"""
Returns the length of the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJLEN', name, str_path(path))
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
Overridden in order to provide the right client through the pipeline.
"""
p = Pipeline(
connection_pool=self.connection_pool,
response_callbacks=self.response_callbacks,
transaction=transaction,
shard_hint=shard_hint)
p.setEncoder(self._encoder)
p.setDecoder(self._decoder)
return p
|
RedisJSON/rejson-py | rejson/client.py | Client.jsonnummultby | python | def jsonnummultby(self, name, path, number):
return self.execute_command('JSON.NUMMULTBY', name, str_path(path), self._encode(number)) | Multiplies the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number`` | train | https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L166-L171 | [
"def str_path(p):\n \"Returns the string representation of a path if it is of class Path\"\n if isinstance(p, Path):\n return p.strPath\n else:\n return p\n"
] | class Client(StrictRedis):
"""
This class subclasses redis-py's `StrictRedis` and implements ReJSON's
commmands (prefixed with "json").
The client performs on-the-fly serialization/deserialization of objects
to/from JSON, and provides the ability to use a custom encoder/decoder.
"""
MODULE_INFO = {
'name': 'ReJSON',
'ver': 1
}
_encoder = None
_encode = None
_decoder = None
_decode = None
def __init__(self, encoder=None, decoder=None, *args, **kwargs):
"""
Creates a new ReJSON client.
``encoder`` should be an instance of a ``json.JSONEncoder`` class
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
self.setEncoder(encoder)
self.setDecoder(decoder)
StrictRedis.__init__(self, *args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
'JSON.DEL': long,
'JSON.GET': self._decode,
'JSON.MGET': bulk_of_jsons(self._decode),
'JSON.SET': lambda r: r and nativestr(r) == 'OK',
'JSON.NUMINCRBY': self._decode,
'JSON.NUMMULTBY': self._decode,
'JSON.STRAPPEND': long,
'JSON.STRLEN': long,
'JSON.ARRAPPEND': long,
'JSON.ARRINDEX': long,
'JSON.ARRINSERT': long,
'JSON.ARRLEN': long,
'JSON.ARRPOP': self._decode,
'JSON.ARRTRIM': long,
'JSON.OBJLEN': long,
}
for k, v in six.iteritems(MODULE_CALLBACKS):
self.set_response_callback(k, v)
def setEncoder(self, encoder):
"""
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
"""
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode
def setDecoder(self, decoder):
"""
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode
def jsondel(self, name, path=Path.rootPath()):
"""
Deletes the JSON value stored at key ``name`` under ``path``
"""
return self.execute_command('JSON.DEL', name, str_path(path))
def jsonget(self, name, *args):
"""
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
"""
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None
def jsonmget(self, path, *args):
"""
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
"""
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces)
def jsonset(self, name, path, obj, nx=False, xx=False):
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
"""
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces)
def jsontype(self, name, path=Path.rootPath()):
"""
Gets the type of the JSON value under ``path`` from key ``name``
"""
return self.execute_command('JSON.TYPE', name, str_path(path))
def jsonnumincrby(self, name, path, number):
"""
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
"""
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number))
def jsonstrappend(self, name, string, path=Path.rootPath()):
"""
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
"""
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string))
def jsonstrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the string JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.STRLEN', name, str_path(path))
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces)
def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
"""
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
"""
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
def jsonarrinsert(self, name, path, index, *args):
"""
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
"""
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces)
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path))
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
def jsonarrtrim(self, name, path, start, stop):
"""
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
"""
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
def jsonobjkeys(self, name, path=Path.rootPath()):
"""
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJKEYS', name, str_path(path))
def jsonobjlen(self, name, path=Path.rootPath()):
"""
Returns the length of the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJLEN', name, str_path(path))
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
Overridden in order to provide the right client through the pipeline.
"""
p = Pipeline(
connection_pool=self.connection_pool,
response_callbacks=self.response_callbacks,
transaction=transaction,
shard_hint=shard_hint)
p.setEncoder(self._encoder)
p.setDecoder(self._decoder)
return p
|
RedisJSON/rejson-py | rejson/client.py | Client.jsonstrappend | python | def jsonstrappend(self, name, string, path=Path.rootPath()):
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string)) | Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string`` | train | https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L173-L178 | [
"def str_path(p):\n \"Returns the string representation of a path if it is of class Path\"\n if isinstance(p, Path):\n return p.strPath\n else:\n return p\n"
] | class Client(StrictRedis):
"""
This class subclasses redis-py's `StrictRedis` and implements ReJSON's
commmands (prefixed with "json").
The client performs on-the-fly serialization/deserialization of objects
to/from JSON, and provides the ability to use a custom encoder/decoder.
"""
MODULE_INFO = {
'name': 'ReJSON',
'ver': 1
}
_encoder = None
_encode = None
_decoder = None
_decode = None
def __init__(self, encoder=None, decoder=None, *args, **kwargs):
"""
Creates a new ReJSON client.
``encoder`` should be an instance of a ``json.JSONEncoder`` class
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
self.setEncoder(encoder)
self.setDecoder(decoder)
StrictRedis.__init__(self, *args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
'JSON.DEL': long,
'JSON.GET': self._decode,
'JSON.MGET': bulk_of_jsons(self._decode),
'JSON.SET': lambda r: r and nativestr(r) == 'OK',
'JSON.NUMINCRBY': self._decode,
'JSON.NUMMULTBY': self._decode,
'JSON.STRAPPEND': long,
'JSON.STRLEN': long,
'JSON.ARRAPPEND': long,
'JSON.ARRINDEX': long,
'JSON.ARRINSERT': long,
'JSON.ARRLEN': long,
'JSON.ARRPOP': self._decode,
'JSON.ARRTRIM': long,
'JSON.OBJLEN': long,
}
for k, v in six.iteritems(MODULE_CALLBACKS):
self.set_response_callback(k, v)
def setEncoder(self, encoder):
"""
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
"""
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode
def setDecoder(self, decoder):
"""
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode
def jsondel(self, name, path=Path.rootPath()):
"""
Deletes the JSON value stored at key ``name`` under ``path``
"""
return self.execute_command('JSON.DEL', name, str_path(path))
def jsonget(self, name, *args):
"""
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
"""
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None
def jsonmget(self, path, *args):
"""
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
"""
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces)
def jsonset(self, name, path, obj, nx=False, xx=False):
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
"""
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces)
def jsontype(self, name, path=Path.rootPath()):
"""
Gets the type of the JSON value under ``path`` from key ``name``
"""
return self.execute_command('JSON.TYPE', name, str_path(path))
def jsonnumincrby(self, name, path, number):
"""
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
"""
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number))
def jsonnummultby(self, name, path, number):
"""
Multiplies the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``
"""
return self.execute_command('JSON.NUMMULTBY', name, str_path(path), self._encode(number))
def jsonstrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the string JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.STRLEN', name, str_path(path))
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces)
def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
"""
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
"""
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
def jsonarrinsert(self, name, path, index, *args):
"""
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
"""
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces)
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path))
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
def jsonarrtrim(self, name, path, start, stop):
"""
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
"""
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
def jsonobjkeys(self, name, path=Path.rootPath()):
"""
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJKEYS', name, str_path(path))
def jsonobjlen(self, name, path=Path.rootPath()):
"""
Returns the length of the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJLEN', name, str_path(path))
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
Overridden in order to provide the right client through the pipeline.
"""
p = Pipeline(
connection_pool=self.connection_pool,
response_callbacks=self.response_callbacks,
transaction=transaction,
shard_hint=shard_hint)
p.setEncoder(self._encoder)
p.setDecoder(self._decoder)
return p
|
RedisJSON/rejson-py | rejson/client.py | Client.jsonstrlen | python | def jsonstrlen(self, name, path=Path.rootPath()):
return self.execute_command('JSON.STRLEN', name, str_path(path)) | Returns the length of the string JSON value under ``path`` at key
``name`` | train | https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L180-L185 | [
"def str_path(p):\n \"Returns the string representation of a path if it is of class Path\"\n if isinstance(p, Path):\n return p.strPath\n else:\n return p\n"
] | class Client(StrictRedis):
"""
This class subclasses redis-py's `StrictRedis` and implements ReJSON's
commmands (prefixed with "json").
The client performs on-the-fly serialization/deserialization of objects
to/from JSON, and provides the ability to use a custom encoder/decoder.
"""
MODULE_INFO = {
'name': 'ReJSON',
'ver': 1
}
_encoder = None
_encode = None
_decoder = None
_decode = None
def __init__(self, encoder=None, decoder=None, *args, **kwargs):
"""
Creates a new ReJSON client.
``encoder`` should be an instance of a ``json.JSONEncoder`` class
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
self.setEncoder(encoder)
self.setDecoder(decoder)
StrictRedis.__init__(self, *args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
'JSON.DEL': long,
'JSON.GET': self._decode,
'JSON.MGET': bulk_of_jsons(self._decode),
'JSON.SET': lambda r: r and nativestr(r) == 'OK',
'JSON.NUMINCRBY': self._decode,
'JSON.NUMMULTBY': self._decode,
'JSON.STRAPPEND': long,
'JSON.STRLEN': long,
'JSON.ARRAPPEND': long,
'JSON.ARRINDEX': long,
'JSON.ARRINSERT': long,
'JSON.ARRLEN': long,
'JSON.ARRPOP': self._decode,
'JSON.ARRTRIM': long,
'JSON.OBJLEN': long,
}
for k, v in six.iteritems(MODULE_CALLBACKS):
self.set_response_callback(k, v)
def setEncoder(self, encoder):
"""
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
"""
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode
def setDecoder(self, decoder):
"""
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode
def jsondel(self, name, path=Path.rootPath()):
"""
Deletes the JSON value stored at key ``name`` under ``path``
"""
return self.execute_command('JSON.DEL', name, str_path(path))
def jsonget(self, name, *args):
"""
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
"""
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None
def jsonmget(self, path, *args):
"""
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
"""
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces)
def jsonset(self, name, path, obj, nx=False, xx=False):
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
"""
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces)
def jsontype(self, name, path=Path.rootPath()):
"""
Gets the type of the JSON value under ``path`` from key ``name``
"""
return self.execute_command('JSON.TYPE', name, str_path(path))
def jsonnumincrby(self, name, path, number):
"""
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
"""
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number))
def jsonnummultby(self, name, path, number):
"""
Multiplies the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``
"""
return self.execute_command('JSON.NUMMULTBY', name, str_path(path), self._encode(number))
def jsonstrappend(self, name, string, path=Path.rootPath()):
"""
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
"""
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string))
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces)
def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
"""
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
"""
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
def jsonarrinsert(self, name, path, index, *args):
"""
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
"""
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces)
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path))
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
def jsonarrtrim(self, name, path, start, stop):
"""
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
"""
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
def jsonobjkeys(self, name, path=Path.rootPath()):
"""
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJKEYS', name, str_path(path))
def jsonobjlen(self, name, path=Path.rootPath()):
"""
Returns the length of the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJLEN', name, str_path(path))
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
Overridden in order to provide the right client through the pipeline.
"""
p = Pipeline(
connection_pool=self.connection_pool,
response_callbacks=self.response_callbacks,
transaction=transaction,
shard_hint=shard_hint)
p.setEncoder(self._encoder)
p.setDecoder(self._decoder)
return p
|
RedisJSON/rejson-py | rejson/client.py | Client.jsonarrappend | python | def jsonarrappend(self, name, path=Path.rootPath(), *args):
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces) | Appends the objects ``args`` to the array under the ``path` in key
``name`` | train | https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L187-L195 | [
"def str_path(p):\n \"Returns the string representation of a path if it is of class Path\"\n if isinstance(p, Path):\n return p.strPath\n else:\n return p\n"
] | class Client(StrictRedis):
"""
This class subclasses redis-py's `StrictRedis` and implements ReJSON's
commmands (prefixed with "json").
The client performs on-the-fly serialization/deserialization of objects
to/from JSON, and provides the ability to use a custom encoder/decoder.
"""
MODULE_INFO = {
'name': 'ReJSON',
'ver': 1
}
_encoder = None
_encode = None
_decoder = None
_decode = None
def __init__(self, encoder=None, decoder=None, *args, **kwargs):
"""
Creates a new ReJSON client.
``encoder`` should be an instance of a ``json.JSONEncoder`` class
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
self.setEncoder(encoder)
self.setDecoder(decoder)
StrictRedis.__init__(self, *args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
'JSON.DEL': long,
'JSON.GET': self._decode,
'JSON.MGET': bulk_of_jsons(self._decode),
'JSON.SET': lambda r: r and nativestr(r) == 'OK',
'JSON.NUMINCRBY': self._decode,
'JSON.NUMMULTBY': self._decode,
'JSON.STRAPPEND': long,
'JSON.STRLEN': long,
'JSON.ARRAPPEND': long,
'JSON.ARRINDEX': long,
'JSON.ARRINSERT': long,
'JSON.ARRLEN': long,
'JSON.ARRPOP': self._decode,
'JSON.ARRTRIM': long,
'JSON.OBJLEN': long,
}
for k, v in six.iteritems(MODULE_CALLBACKS):
self.set_response_callback(k, v)
def setEncoder(self, encoder):
"""
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
"""
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode
def setDecoder(self, decoder):
"""
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode
def jsondel(self, name, path=Path.rootPath()):
"""
Deletes the JSON value stored at key ``name`` under ``path``
"""
return self.execute_command('JSON.DEL', name, str_path(path))
def jsonget(self, name, *args):
"""
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
"""
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None
def jsonmget(self, path, *args):
"""
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
"""
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces)
def jsonset(self, name, path, obj, nx=False, xx=False):
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
"""
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces)
def jsontype(self, name, path=Path.rootPath()):
"""
Gets the type of the JSON value under ``path`` from key ``name``
"""
return self.execute_command('JSON.TYPE', name, str_path(path))
def jsonnumincrby(self, name, path, number):
"""
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
"""
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number))
def jsonnummultby(self, name, path, number):
"""
Multiplies the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``
"""
return self.execute_command('JSON.NUMMULTBY', name, str_path(path), self._encode(number))
def jsonstrappend(self, name, string, path=Path.rootPath()):
"""
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
"""
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string))
def jsonstrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the string JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.STRLEN', name, str_path(path))
def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
"""
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
"""
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
def jsonarrinsert(self, name, path, index, *args):
"""
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
"""
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces)
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path))
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
def jsonarrtrim(self, name, path, start, stop):
"""
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
"""
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
def jsonobjkeys(self, name, path=Path.rootPath()):
"""
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJKEYS', name, str_path(path))
def jsonobjlen(self, name, path=Path.rootPath()):
"""
Returns the length of the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJLEN', name, str_path(path))
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
Overridden in order to provide the right client through the pipeline.
"""
p = Pipeline(
connection_pool=self.connection_pool,
response_callbacks=self.response_callbacks,
transaction=transaction,
shard_hint=shard_hint)
p.setEncoder(self._encoder)
p.setDecoder(self._decoder)
return p
|
RedisJSON/rejson-py | rejson/client.py | Client.jsonarrindex | python | def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop) | Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices. | train | https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L197-L203 | [
"def str_path(p):\n \"Returns the string representation of a path if it is of class Path\"\n if isinstance(p, Path):\n return p.strPath\n else:\n return p\n"
] | class Client(StrictRedis):
"""
This class subclasses redis-py's `StrictRedis` and implements ReJSON's
commmands (prefixed with "json").
The client performs on-the-fly serialization/deserialization of objects
to/from JSON, and provides the ability to use a custom encoder/decoder.
"""
MODULE_INFO = {
'name': 'ReJSON',
'ver': 1
}
_encoder = None
_encode = None
_decoder = None
_decode = None
def __init__(self, encoder=None, decoder=None, *args, **kwargs):
"""
Creates a new ReJSON client.
``encoder`` should be an instance of a ``json.JSONEncoder`` class
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
self.setEncoder(encoder)
self.setDecoder(decoder)
StrictRedis.__init__(self, *args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
'JSON.DEL': long,
'JSON.GET': self._decode,
'JSON.MGET': bulk_of_jsons(self._decode),
'JSON.SET': lambda r: r and nativestr(r) == 'OK',
'JSON.NUMINCRBY': self._decode,
'JSON.NUMMULTBY': self._decode,
'JSON.STRAPPEND': long,
'JSON.STRLEN': long,
'JSON.ARRAPPEND': long,
'JSON.ARRINDEX': long,
'JSON.ARRINSERT': long,
'JSON.ARRLEN': long,
'JSON.ARRPOP': self._decode,
'JSON.ARRTRIM': long,
'JSON.OBJLEN': long,
}
for k, v in six.iteritems(MODULE_CALLBACKS):
self.set_response_callback(k, v)
def setEncoder(self, encoder):
"""
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
"""
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode
def setDecoder(self, decoder):
"""
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode
def jsondel(self, name, path=Path.rootPath()):
"""
Deletes the JSON value stored at key ``name`` under ``path``
"""
return self.execute_command('JSON.DEL', name, str_path(path))
def jsonget(self, name, *args):
"""
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
"""
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None
def jsonmget(self, path, *args):
"""
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
"""
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces)
def jsonset(self, name, path, obj, nx=False, xx=False):
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
"""
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces)
def jsontype(self, name, path=Path.rootPath()):
"""
Gets the type of the JSON value under ``path`` from key ``name``
"""
return self.execute_command('JSON.TYPE', name, str_path(path))
def jsonnumincrby(self, name, path, number):
"""
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
"""
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number))
def jsonnummultby(self, name, path, number):
"""
Multiplies the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``
"""
return self.execute_command('JSON.NUMMULTBY', name, str_path(path), self._encode(number))
def jsonstrappend(self, name, string, path=Path.rootPath()):
"""
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
"""
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string))
def jsonstrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the string JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.STRLEN', name, str_path(path))
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces)
def jsonarrinsert(self, name, path, index, *args):
"""
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
"""
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces)
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path))
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
def jsonarrtrim(self, name, path, start, stop):
"""
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
"""
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
def jsonobjkeys(self, name, path=Path.rootPath()):
"""
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJKEYS', name, str_path(path))
def jsonobjlen(self, name, path=Path.rootPath()):
"""
Returns the length of the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJLEN', name, str_path(path))
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
Overridden in order to provide the right client through the pipeline.
"""
p = Pipeline(
connection_pool=self.connection_pool,
response_callbacks=self.response_callbacks,
transaction=transaction,
shard_hint=shard_hint)
p.setEncoder(self._encoder)
p.setDecoder(self._decoder)
return p
|
RedisJSON/rejson-py | rejson/client.py | Client.jsonarrinsert | python | def jsonarrinsert(self, name, path, index, *args):
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces) | Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name`` | train | https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L205-L213 | [
"def str_path(p):\n \"Returns the string representation of a path if it is of class Path\"\n if isinstance(p, Path):\n return p.strPath\n else:\n return p\n"
] | class Client(StrictRedis):
"""
This class subclasses redis-py's `StrictRedis` and implements ReJSON's
commmands (prefixed with "json").
The client performs on-the-fly serialization/deserialization of objects
to/from JSON, and provides the ability to use a custom encoder/decoder.
"""
MODULE_INFO = {
'name': 'ReJSON',
'ver': 1
}
_encoder = None
_encode = None
_decoder = None
_decode = None
def __init__(self, encoder=None, decoder=None, *args, **kwargs):
"""
Creates a new ReJSON client.
``encoder`` should be an instance of a ``json.JSONEncoder`` class
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
self.setEncoder(encoder)
self.setDecoder(decoder)
StrictRedis.__init__(self, *args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
'JSON.DEL': long,
'JSON.GET': self._decode,
'JSON.MGET': bulk_of_jsons(self._decode),
'JSON.SET': lambda r: r and nativestr(r) == 'OK',
'JSON.NUMINCRBY': self._decode,
'JSON.NUMMULTBY': self._decode,
'JSON.STRAPPEND': long,
'JSON.STRLEN': long,
'JSON.ARRAPPEND': long,
'JSON.ARRINDEX': long,
'JSON.ARRINSERT': long,
'JSON.ARRLEN': long,
'JSON.ARRPOP': self._decode,
'JSON.ARRTRIM': long,
'JSON.OBJLEN': long,
}
for k, v in six.iteritems(MODULE_CALLBACKS):
self.set_response_callback(k, v)
def setEncoder(self, encoder):
"""
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
"""
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode
def setDecoder(self, decoder):
"""
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode
def jsondel(self, name, path=Path.rootPath()):
"""
Deletes the JSON value stored at key ``name`` under ``path``
"""
return self.execute_command('JSON.DEL', name, str_path(path))
def jsonget(self, name, *args):
"""
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
"""
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None
def jsonmget(self, path, *args):
"""
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
"""
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces)
def jsonset(self, name, path, obj, nx=False, xx=False):
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
"""
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces)
def jsontype(self, name, path=Path.rootPath()):
"""
Gets the type of the JSON value under ``path`` from key ``name``
"""
return self.execute_command('JSON.TYPE', name, str_path(path))
def jsonnumincrby(self, name, path, number):
"""
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
"""
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number))
def jsonnummultby(self, name, path, number):
"""
Multiplies the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``
"""
return self.execute_command('JSON.NUMMULTBY', name, str_path(path), self._encode(number))
def jsonstrappend(self, name, string, path=Path.rootPath()):
"""
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
"""
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string))
def jsonstrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the string JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.STRLEN', name, str_path(path))
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces)
def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
"""
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
"""
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path))
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
def jsonarrtrim(self, name, path, start, stop):
"""
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
"""
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
def jsonobjkeys(self, name, path=Path.rootPath()):
"""
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJKEYS', name, str_path(path))
def jsonobjlen(self, name, path=Path.rootPath()):
"""
Returns the length of the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJLEN', name, str_path(path))
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
Overridden in order to provide the right client through the pipeline.
"""
p = Pipeline(
connection_pool=self.connection_pool,
response_callbacks=self.response_callbacks,
transaction=transaction,
shard_hint=shard_hint)
p.setEncoder(self._encoder)
p.setDecoder(self._decoder)
return p
|
RedisJSON/rejson-py | rejson/client.py | Client.jsonarrlen | python | def jsonarrlen(self, name, path=Path.rootPath()):
return self.execute_command('JSON.ARRLEN', name, str_path(path)) | Returns the length of the array JSON value under ``path`` at key
``name`` | train | https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L215-L220 | [
"def str_path(p):\n \"Returns the string representation of a path if it is of class Path\"\n if isinstance(p, Path):\n return p.strPath\n else:\n return p\n"
] | class Client(StrictRedis):
"""
This class subclasses redis-py's `StrictRedis` and implements ReJSON's
commmands (prefixed with "json").
The client performs on-the-fly serialization/deserialization of objects
to/from JSON, and provides the ability to use a custom encoder/decoder.
"""
MODULE_INFO = {
'name': 'ReJSON',
'ver': 1
}
_encoder = None
_encode = None
_decoder = None
_decode = None
def __init__(self, encoder=None, decoder=None, *args, **kwargs):
"""
Creates a new ReJSON client.
``encoder`` should be an instance of a ``json.JSONEncoder`` class
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
self.setEncoder(encoder)
self.setDecoder(decoder)
StrictRedis.__init__(self, *args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
'JSON.DEL': long,
'JSON.GET': self._decode,
'JSON.MGET': bulk_of_jsons(self._decode),
'JSON.SET': lambda r: r and nativestr(r) == 'OK',
'JSON.NUMINCRBY': self._decode,
'JSON.NUMMULTBY': self._decode,
'JSON.STRAPPEND': long,
'JSON.STRLEN': long,
'JSON.ARRAPPEND': long,
'JSON.ARRINDEX': long,
'JSON.ARRINSERT': long,
'JSON.ARRLEN': long,
'JSON.ARRPOP': self._decode,
'JSON.ARRTRIM': long,
'JSON.OBJLEN': long,
}
for k, v in six.iteritems(MODULE_CALLBACKS):
self.set_response_callback(k, v)
def setEncoder(self, encoder):
"""
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
"""
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode
def setDecoder(self, decoder):
"""
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode
def jsondel(self, name, path=Path.rootPath()):
"""
Deletes the JSON value stored at key ``name`` under ``path``
"""
return self.execute_command('JSON.DEL', name, str_path(path))
def jsonget(self, name, *args):
"""
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
"""
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None
def jsonmget(self, path, *args):
"""
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
"""
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces)
def jsonset(self, name, path, obj, nx=False, xx=False):
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
"""
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces)
def jsontype(self, name, path=Path.rootPath()):
"""
Gets the type of the JSON value under ``path`` from key ``name``
"""
return self.execute_command('JSON.TYPE', name, str_path(path))
def jsonnumincrby(self, name, path, number):
"""
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
"""
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number))
def jsonnummultby(self, name, path, number):
"""
Multiplies the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``
"""
return self.execute_command('JSON.NUMMULTBY', name, str_path(path), self._encode(number))
def jsonstrappend(self, name, string, path=Path.rootPath()):
"""
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
"""
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string))
def jsonstrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the string JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.STRLEN', name, str_path(path))
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces)
def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
"""
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
"""
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
def jsonarrinsert(self, name, path, index, *args):
"""
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
"""
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces)
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
def jsonarrtrim(self, name, path, start, stop):
"""
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
"""
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
def jsonobjkeys(self, name, path=Path.rootPath()):
"""
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJKEYS', name, str_path(path))
def jsonobjlen(self, name, path=Path.rootPath()):
"""
Returns the length of the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJLEN', name, str_path(path))
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
Overridden in order to provide the right client through the pipeline.
"""
p = Pipeline(
connection_pool=self.connection_pool,
response_callbacks=self.response_callbacks,
transaction=transaction,
shard_hint=shard_hint)
p.setEncoder(self._encoder)
p.setDecoder(self._decoder)
return p
|
RedisJSON/rejson-py | rejson/client.py | Client.jsonarrpop | python | def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
return self.execute_command('JSON.ARRPOP', name, str_path(path), index) | Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name`` | train | https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L222-L227 | [
"def str_path(p):\n \"Returns the string representation of a path if it is of class Path\"\n if isinstance(p, Path):\n return p.strPath\n else:\n return p\n"
] | class Client(StrictRedis):
"""
This class subclasses redis-py's `StrictRedis` and implements ReJSON's
commmands (prefixed with "json").
The client performs on-the-fly serialization/deserialization of objects
to/from JSON, and provides the ability to use a custom encoder/decoder.
"""
MODULE_INFO = {
'name': 'ReJSON',
'ver': 1
}
_encoder = None
_encode = None
_decoder = None
_decode = None
def __init__(self, encoder=None, decoder=None, *args, **kwargs):
"""
Creates a new ReJSON client.
``encoder`` should be an instance of a ``json.JSONEncoder`` class
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
self.setEncoder(encoder)
self.setDecoder(decoder)
StrictRedis.__init__(self, *args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
'JSON.DEL': long,
'JSON.GET': self._decode,
'JSON.MGET': bulk_of_jsons(self._decode),
'JSON.SET': lambda r: r and nativestr(r) == 'OK',
'JSON.NUMINCRBY': self._decode,
'JSON.NUMMULTBY': self._decode,
'JSON.STRAPPEND': long,
'JSON.STRLEN': long,
'JSON.ARRAPPEND': long,
'JSON.ARRINDEX': long,
'JSON.ARRINSERT': long,
'JSON.ARRLEN': long,
'JSON.ARRPOP': self._decode,
'JSON.ARRTRIM': long,
'JSON.OBJLEN': long,
}
for k, v in six.iteritems(MODULE_CALLBACKS):
self.set_response_callback(k, v)
def setEncoder(self, encoder):
"""
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
"""
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode
def setDecoder(self, decoder):
"""
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode
def jsondel(self, name, path=Path.rootPath()):
"""
Deletes the JSON value stored at key ``name`` under ``path``
"""
return self.execute_command('JSON.DEL', name, str_path(path))
def jsonget(self, name, *args):
"""
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
"""
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None
def jsonmget(self, path, *args):
"""
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
"""
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces)
def jsonset(self, name, path, obj, nx=False, xx=False):
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
"""
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces)
def jsontype(self, name, path=Path.rootPath()):
"""
Gets the type of the JSON value under ``path`` from key ``name``
"""
return self.execute_command('JSON.TYPE', name, str_path(path))
def jsonnumincrby(self, name, path, number):
"""
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
"""
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number))
def jsonnummultby(self, name, path, number):
"""
Multiplies the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``
"""
return self.execute_command('JSON.NUMMULTBY', name, str_path(path), self._encode(number))
def jsonstrappend(self, name, string, path=Path.rootPath()):
"""
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
"""
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string))
def jsonstrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the string JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.STRLEN', name, str_path(path))
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces)
def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
"""
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
"""
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
def jsonarrinsert(self, name, path, index, *args):
"""
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
"""
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces)
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path))
def jsonarrtrim(self, name, path, start, stop):
"""
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
"""
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
def jsonobjkeys(self, name, path=Path.rootPath()):
"""
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJKEYS', name, str_path(path))
def jsonobjlen(self, name, path=Path.rootPath()):
"""
Returns the length of the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJLEN', name, str_path(path))
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
Overridden in order to provide the right client through the pipeline.
"""
p = Pipeline(
connection_pool=self.connection_pool,
response_callbacks=self.response_callbacks,
transaction=transaction,
shard_hint=shard_hint)
p.setEncoder(self._encoder)
p.setDecoder(self._decoder)
return p
|
RedisJSON/rejson-py | rejson/client.py | Client.jsonarrtrim | python | def jsonarrtrim(self, name, path, start, stop):
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop) | Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop`` | train | https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L229-L234 | [
"def str_path(p):\n \"Returns the string representation of a path if it is of class Path\"\n if isinstance(p, Path):\n return p.strPath\n else:\n return p\n"
] | class Client(StrictRedis):
"""
This class subclasses redis-py's `StrictRedis` and implements ReJSON's
commmands (prefixed with "json").
The client performs on-the-fly serialization/deserialization of objects
to/from JSON, and provides the ability to use a custom encoder/decoder.
"""
MODULE_INFO = {
'name': 'ReJSON',
'ver': 1
}
_encoder = None
_encode = None
_decoder = None
_decode = None
def __init__(self, encoder=None, decoder=None, *args, **kwargs):
"""
Creates a new ReJSON client.
``encoder`` should be an instance of a ``json.JSONEncoder`` class
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
self.setEncoder(encoder)
self.setDecoder(decoder)
StrictRedis.__init__(self, *args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
'JSON.DEL': long,
'JSON.GET': self._decode,
'JSON.MGET': bulk_of_jsons(self._decode),
'JSON.SET': lambda r: r and nativestr(r) == 'OK',
'JSON.NUMINCRBY': self._decode,
'JSON.NUMMULTBY': self._decode,
'JSON.STRAPPEND': long,
'JSON.STRLEN': long,
'JSON.ARRAPPEND': long,
'JSON.ARRINDEX': long,
'JSON.ARRINSERT': long,
'JSON.ARRLEN': long,
'JSON.ARRPOP': self._decode,
'JSON.ARRTRIM': long,
'JSON.OBJLEN': long,
}
for k, v in six.iteritems(MODULE_CALLBACKS):
self.set_response_callback(k, v)
def setEncoder(self, encoder):
"""
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
"""
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode
def setDecoder(self, decoder):
"""
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode
def jsondel(self, name, path=Path.rootPath()):
"""
Deletes the JSON value stored at key ``name`` under ``path``
"""
return self.execute_command('JSON.DEL', name, str_path(path))
def jsonget(self, name, *args):
"""
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
"""
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None
def jsonmget(self, path, *args):
"""
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
"""
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces)
def jsonset(self, name, path, obj, nx=False, xx=False):
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
"""
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces)
def jsontype(self, name, path=Path.rootPath()):
"""
Gets the type of the JSON value under ``path`` from key ``name``
"""
return self.execute_command('JSON.TYPE', name, str_path(path))
def jsonnumincrby(self, name, path, number):
"""
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
"""
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number))
def jsonnummultby(self, name, path, number):
"""
Multiplies the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``
"""
return self.execute_command('JSON.NUMMULTBY', name, str_path(path), self._encode(number))
def jsonstrappend(self, name, string, path=Path.rootPath()):
"""
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
"""
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string))
def jsonstrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the string JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.STRLEN', name, str_path(path))
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces)
def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
"""
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
"""
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
def jsonarrinsert(self, name, path, index, *args):
"""
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
"""
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces)
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path))
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
def jsonobjkeys(self, name, path=Path.rootPath()):
"""
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJKEYS', name, str_path(path))
def jsonobjlen(self, name, path=Path.rootPath()):
"""
Returns the length of the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJLEN', name, str_path(path))
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
Overridden in order to provide the right client through the pipeline.
"""
p = Pipeline(
connection_pool=self.connection_pool,
response_callbacks=self.response_callbacks,
transaction=transaction,
shard_hint=shard_hint)
p.setEncoder(self._encoder)
p.setDecoder(self._decoder)
return p
|
RedisJSON/rejson-py | rejson/client.py | Client.jsonobjkeys | python | def jsonobjkeys(self, name, path=Path.rootPath()):
return self.execute_command('JSON.OBJKEYS', name, str_path(path)) | Returns the key names in the dictionary JSON value under ``path`` at key
``name`` | train | https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L236-L241 | [
"def str_path(p):\n \"Returns the string representation of a path if it is of class Path\"\n if isinstance(p, Path):\n return p.strPath\n else:\n return p\n"
] | class Client(StrictRedis):
"""
This class subclasses redis-py's `StrictRedis` and implements ReJSON's
commmands (prefixed with "json").
The client performs on-the-fly serialization/deserialization of objects
to/from JSON, and provides the ability to use a custom encoder/decoder.
"""
MODULE_INFO = {
'name': 'ReJSON',
'ver': 1
}
_encoder = None
_encode = None
_decoder = None
_decode = None
def __init__(self, encoder=None, decoder=None, *args, **kwargs):
"""
Creates a new ReJSON client.
``encoder`` should be an instance of a ``json.JSONEncoder`` class
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
self.setEncoder(encoder)
self.setDecoder(decoder)
StrictRedis.__init__(self, *args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
'JSON.DEL': long,
'JSON.GET': self._decode,
'JSON.MGET': bulk_of_jsons(self._decode),
'JSON.SET': lambda r: r and nativestr(r) == 'OK',
'JSON.NUMINCRBY': self._decode,
'JSON.NUMMULTBY': self._decode,
'JSON.STRAPPEND': long,
'JSON.STRLEN': long,
'JSON.ARRAPPEND': long,
'JSON.ARRINDEX': long,
'JSON.ARRINSERT': long,
'JSON.ARRLEN': long,
'JSON.ARRPOP': self._decode,
'JSON.ARRTRIM': long,
'JSON.OBJLEN': long,
}
for k, v in six.iteritems(MODULE_CALLBACKS):
self.set_response_callback(k, v)
def setEncoder(self, encoder):
"""
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
"""
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode
def setDecoder(self, decoder):
"""
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode
def jsondel(self, name, path=Path.rootPath()):
"""
Deletes the JSON value stored at key ``name`` under ``path``
"""
return self.execute_command('JSON.DEL', name, str_path(path))
def jsonget(self, name, *args):
"""
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
"""
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None
def jsonmget(self, path, *args):
"""
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
"""
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces)
def jsonset(self, name, path, obj, nx=False, xx=False):
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
"""
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces)
def jsontype(self, name, path=Path.rootPath()):
"""
Gets the type of the JSON value under ``path`` from key ``name``
"""
return self.execute_command('JSON.TYPE', name, str_path(path))
def jsonnumincrby(self, name, path, number):
"""
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
"""
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number))
def jsonnummultby(self, name, path, number):
"""
Multiplies the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``
"""
return self.execute_command('JSON.NUMMULTBY', name, str_path(path), self._encode(number))
def jsonstrappend(self, name, string, path=Path.rootPath()):
"""
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
"""
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string))
def jsonstrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the string JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.STRLEN', name, str_path(path))
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces)
def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
"""
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
"""
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
def jsonarrinsert(self, name, path, index, *args):
"""
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
"""
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces)
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path))
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
def jsonarrtrim(self, name, path, start, stop):
"""
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
"""
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
def jsonobjlen(self, name, path=Path.rootPath()):
"""
Returns the length of the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJLEN', name, str_path(path))
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
Overridden in order to provide the right client through the pipeline.
"""
p = Pipeline(
connection_pool=self.connection_pool,
response_callbacks=self.response_callbacks,
transaction=transaction,
shard_hint=shard_hint)
p.setEncoder(self._encoder)
p.setDecoder(self._decoder)
return p
|
RedisJSON/rejson-py | rejson/client.py | Client.jsonobjlen | python | def jsonobjlen(self, name, path=Path.rootPath()):
return self.execute_command('JSON.OBJLEN', name, str_path(path)) | Returns the length of the dictionary JSON value under ``path`` at key
``name`` | train | https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L243-L248 | [
"def str_path(p):\n \"Returns the string representation of a path if it is of class Path\"\n if isinstance(p, Path):\n return p.strPath\n else:\n return p\n"
] | class Client(StrictRedis):
"""
This class subclasses redis-py's `StrictRedis` and implements ReJSON's
commmands (prefixed with "json").
The client performs on-the-fly serialization/deserialization of objects
to/from JSON, and provides the ability to use a custom encoder/decoder.
"""
MODULE_INFO = {
'name': 'ReJSON',
'ver': 1
}
_encoder = None
_encode = None
_decoder = None
_decode = None
def __init__(self, encoder=None, decoder=None, *args, **kwargs):
"""
Creates a new ReJSON client.
``encoder`` should be an instance of a ``json.JSONEncoder`` class
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
self.setEncoder(encoder)
self.setDecoder(decoder)
StrictRedis.__init__(self, *args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
'JSON.DEL': long,
'JSON.GET': self._decode,
'JSON.MGET': bulk_of_jsons(self._decode),
'JSON.SET': lambda r: r and nativestr(r) == 'OK',
'JSON.NUMINCRBY': self._decode,
'JSON.NUMMULTBY': self._decode,
'JSON.STRAPPEND': long,
'JSON.STRLEN': long,
'JSON.ARRAPPEND': long,
'JSON.ARRINDEX': long,
'JSON.ARRINSERT': long,
'JSON.ARRLEN': long,
'JSON.ARRPOP': self._decode,
'JSON.ARRTRIM': long,
'JSON.OBJLEN': long,
}
for k, v in six.iteritems(MODULE_CALLBACKS):
self.set_response_callback(k, v)
def setEncoder(self, encoder):
"""
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
"""
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode
def setDecoder(self, decoder):
"""
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode
def jsondel(self, name, path=Path.rootPath()):
"""
Deletes the JSON value stored at key ``name`` under ``path``
"""
return self.execute_command('JSON.DEL', name, str_path(path))
def jsonget(self, name, *args):
"""
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
"""
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None
def jsonmget(self, path, *args):
"""
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
"""
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces)
def jsonset(self, name, path, obj, nx=False, xx=False):
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
"""
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces)
def jsontype(self, name, path=Path.rootPath()):
"""
Gets the type of the JSON value under ``path`` from key ``name``
"""
return self.execute_command('JSON.TYPE', name, str_path(path))
def jsonnumincrby(self, name, path, number):
"""
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
"""
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number))
def jsonnummultby(self, name, path, number):
"""
Multiplies the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``
"""
return self.execute_command('JSON.NUMMULTBY', name, str_path(path), self._encode(number))
def jsonstrappend(self, name, string, path=Path.rootPath()):
"""
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
"""
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string))
def jsonstrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the string JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.STRLEN', name, str_path(path))
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces)
def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
"""
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
"""
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
def jsonarrinsert(self, name, path, index, *args):
"""
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
"""
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces)
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path))
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
def jsonarrtrim(self, name, path, start, stop):
"""
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
"""
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
def jsonobjkeys(self, name, path=Path.rootPath()):
"""
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJKEYS', name, str_path(path))
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
Overridden in order to provide the right client through the pipeline.
"""
p = Pipeline(
connection_pool=self.connection_pool,
response_callbacks=self.response_callbacks,
transaction=transaction,
shard_hint=shard_hint)
p.setEncoder(self._encoder)
p.setDecoder(self._decoder)
return p
|
RedisJSON/rejson-py | rejson/client.py | Client.pipeline | python | def pipeline(self, transaction=True, shard_hint=None):
p = Pipeline(
connection_pool=self.connection_pool,
response_callbacks=self.response_callbacks,
transaction=transaction,
shard_hint=shard_hint)
p.setEncoder(self._encoder)
p.setDecoder(self._decoder)
return p | Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
Overridden in order to provide the right client through the pipeline. | train | https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L250-L267 | null | class Client(StrictRedis):
"""
This class subclasses redis-py's `StrictRedis` and implements ReJSON's
commmands (prefixed with "json").
The client performs on-the-fly serialization/deserialization of objects
to/from JSON, and provides the ability to use a custom encoder/decoder.
"""
MODULE_INFO = {
'name': 'ReJSON',
'ver': 1
}
_encoder = None
_encode = None
_decoder = None
_decode = None
def __init__(self, encoder=None, decoder=None, *args, **kwargs):
"""
Creates a new ReJSON client.
``encoder`` should be an instance of a ``json.JSONEncoder`` class
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
self.setEncoder(encoder)
self.setDecoder(decoder)
StrictRedis.__init__(self, *args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
'JSON.DEL': long,
'JSON.GET': self._decode,
'JSON.MGET': bulk_of_jsons(self._decode),
'JSON.SET': lambda r: r and nativestr(r) == 'OK',
'JSON.NUMINCRBY': self._decode,
'JSON.NUMMULTBY': self._decode,
'JSON.STRAPPEND': long,
'JSON.STRLEN': long,
'JSON.ARRAPPEND': long,
'JSON.ARRINDEX': long,
'JSON.ARRINSERT': long,
'JSON.ARRLEN': long,
'JSON.ARRPOP': self._decode,
'JSON.ARRTRIM': long,
'JSON.OBJLEN': long,
}
for k, v in six.iteritems(MODULE_CALLBACKS):
self.set_response_callback(k, v)
def setEncoder(self, encoder):
"""
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
"""
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode
def setDecoder(self, decoder):
"""
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode
def jsondel(self, name, path=Path.rootPath()):
"""
Deletes the JSON value stored at key ``name`` under ``path``
"""
return self.execute_command('JSON.DEL', name, str_path(path))
def jsonget(self, name, *args):
"""
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
"""
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None
def jsonmget(self, path, *args):
"""
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
"""
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces)
def jsonset(self, name, path, obj, nx=False, xx=False):
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
"""
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces)
def jsontype(self, name, path=Path.rootPath()):
"""
Gets the type of the JSON value under ``path`` from key ``name``
"""
return self.execute_command('JSON.TYPE', name, str_path(path))
def jsonnumincrby(self, name, path, number):
"""
Increments the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``
"""
return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number))
def jsonnummultby(self, name, path, number):
"""
Multiplies the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``
"""
return self.execute_command('JSON.NUMMULTBY', name, str_path(path), self._encode(number))
def jsonstrappend(self, name, string, path=Path.rootPath()):
"""
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
"""
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string))
def jsonstrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the string JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.STRLEN', name, str_path(path))
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces)
def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
"""
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
"""
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
def jsonarrinsert(self, name, path, index, *args):
"""
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
"""
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces)
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path))
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
def jsonarrtrim(self, name, path, start, stop):
"""
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
"""
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
def jsonobjkeys(self, name, path=Path.rootPath()):
"""
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJKEYS', name, str_path(path))
def jsonobjlen(self, name, path=Path.rootPath()):
"""
Returns the length of the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJLEN', name, str_path(path))
|
btel/svg_utils | src/svgutils/transform.py | fromfile | python | def fromfile(fname):
fig = SVGFigure()
with open(fname) as fid:
svg_file = etree.parse(fid)
fig.root = svg_file.getroot()
return fig | Open SVG figure from file.
Parameters
----------
fname : str
name of the SVG file
Returns
-------
SVGFigure
newly created :py:class:`SVGFigure` initialised with the file content | train | https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/transform.py#L294-L312 | null | from lxml import etree
from copy import deepcopy
import codecs
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
SVG_NAMESPACE = "http://www.w3.org/2000/svg"
XLINK_NAMESPACE = "http://www.w3.org/1999/xlink"
SVG = "{%s}" % SVG_NAMESPACE
XLINK = "{%s}" % XLINK_NAMESPACE
NSMAP = {None: SVG_NAMESPACE,
'xlink': XLINK_NAMESPACE}
class FigureElement(object):
"""Base class representing single figure element"""
def __init__(self, xml_element, defs=None):
self.root = xml_element
def moveto(self, x, y, scale=1):
"""Move and scale element.
Parameters
----------
x, y : float
displacement in x and y coordinates in user units ('px').
scale : float
scaling factor. To scale down scale < 1, scale up scale > 1.
For no scaling scale = 1.
"""
self.root.set("transform", "translate(%s, %s) scale(%s) %s" %
(x, y, scale, self.root.get("transform") or ''))
def rotate(self, angle, x=0, y=0):
"""Rotate element by given angle around given pivot.
Parameters
----------
angle : float
rotation angle in degrees
x, y : float
pivot coordinates in user coordinate system (defaults to top-left
corner of the figure)
"""
self.root.set("transform", "%s rotate(%f %f %f)" %
(self.root.get("transform") or '', angle, x, y))
def skew(self, x=0, y=0):
"""Skew the element by x and y degrees
Convenience function which calls skew_x and skew_y
Parameters
----------
x,y : float, float
skew angle in degrees (default 0)
If an x/y angle is given as zero degrees, that transformation is omitted.
"""
if x is not 0:
self.skew_x(x)
if y is not 0:
self.skew_y(y)
return self
def skew_x(self, x):
"""Skew element along the x-axis by the given angle.
Parameters
----------
x : float
x-axis skew angle in degrees
"""
self.root.set("transform", "%s skewX(%f)" %
(self.root.get("transform") or '', x))
return self
def skew_y(self, y):
"""Skew element along the y-axis by the given angle.
Parameters
----------
y : float
y-axis skew angle in degrees
"""
self.root.set("transform", "%s skewY(%f)" %
(self.root.get("transform") or '', y))
return self
def scale_xy(self, x=0, y=None):
"""Scale element separately across the two axes x and y.
If y is not provided, it is assumed equal to x (according to the
W3 specification).
Parameters
----------
x : float
x-axis scaling factor. To scale down x < 1, scale up x > 1.
y : (optional) float
y-axis scaling factor. To scale down y < 1, scale up y > 1.
"""
self.root.set("transform", "%s scale(%f %f)" %
(self.root.get("transform") or '',
x, y if y is not None else ''))
def __getitem__(self, i):
return FigureElement(self.root.getchildren()[i])
def copy(self):
"""Make a copy of the element"""
return deepcopy(self.root)
def tostr(self):
"""String representation of the element"""
return etree.tostring(self.root, pretty_print=True)
def find_id(self, element_id):
"""Find element by its id.
Parameters
----------
element_id : str
ID of the element to find
Returns
-------
FigureElement
one of the children element with the given ID."""
find = etree.XPath("//*[@id=$id]")
return FigureElement(find(self.root, id=element_id)[0])
class TextElement(FigureElement):
"""Text element.
Corresponds to SVG ``<text>`` tag."""
def __init__(self, x, y, text, size=8, font="Verdana",
weight="normal", letterspacing=0, anchor='start',
color='black'):
txt = etree.Element(SVG+"text", {"x": str(x), "y": str(y),
"font-size": str(size),
"font-family": font,
"font-weight": weight,
"letter-spacing": str(letterspacing),
"text-anchor": str(anchor),
"fill": str(color)})
txt.text = text
FigureElement.__init__(self, txt)
class ImageElement(FigureElement):
"""Inline image element.
Correspoonds to SVG ``<image>`` tag. Image data encoded as base64 string.
"""
def __init__(self, stream, width, height, format='png'):
base64str = codecs.encode(stream.read(), 'base64').rstrip()
uri = "data:image/{};base64,{}".format(format,
base64str.decode('ascii'))
attrs = {
'width': str(width),
'height': str(height),
XLINK+'href': uri
}
img = etree.Element(SVG+"image", attrs)
FigureElement.__init__(self, img)
class LineElement(FigureElement):
"""Line element.
Corresponds to SVG ``<path>`` tag. It handles only piecewise
straight segments
"""
def __init__(self, points, width=1, color='black'):
linedata = "M{} {} ".format(*points[0])
linedata += " ".join(map(lambda x: "L{} {}".format(*x), points[1:]))
line = etree.Element(SVG+"path",
{"d": linedata,
"stroke-width": str(width),
"stroke": color})
FigureElement.__init__(self, line)
class GroupElement(FigureElement):
"""Group element.
Container for other elements. Corresponds to SVG ``<g>`` tag.
"""
def __init__(self, element_list, attrib=None):
new_group = etree.Element(SVG+"g", attrib=attrib)
for e in element_list:
if isinstance(e, FigureElement):
new_group.append(e.root)
else:
new_group.append(e)
self.root = new_group
class SVGFigure(object):
"""SVG Figure.
It setups standalone SVG tree. It corresponds to SVG ``<svg>`` tag.
"""
def __init__(self, width=None, height=None):
self.root = etree.Element(SVG+"svg", nsmap=NSMAP)
self.root.set("version", "1.1")
if width:
self.width = width
if height:
self.height = height
@property
def width(self):
"""Figure width"""
return self.root.get("width")
@width.setter
def width(self, value):
self.root.set('width', str(value))
self.root.set("viewBox", "0 0 %s %s" % (self.width, self.height))
@property
def height(self):
"""Figure height"""
return self.root.get("height")
@height.setter
def height(self, value):
self.root.set('height', str(value))
self.root.set("viewBox", "0 0 %s %s" % (self.width, self.height))
def append(self, element):
"""Append new element to the SVG figure"""
try:
self.root.append(element.root)
except AttributeError:
self.root.append(GroupElement(element).root)
def getroot(self):
"""Return the root element of the figure.
The root element is a group of elements after stripping the toplevel
``<svg>`` tag.
Returns
-------
GroupElement
All elements of the figure without the ``<svg>`` tag.
"""
if 'class' in self.root.attrib:
attrib = {'class': self.root.attrib['class']}
else:
attrib = None
return GroupElement(self.root.getchildren(), attrib=attrib)
def to_str(self):
"""
Returns a string of the SVG figure.
"""
return etree.tostring(self.root, xml_declaration=True,
standalone=True,
pretty_print=True)
def save(self, fname):
"""Save figure to a file"""
out = etree.tostring(self.root, xml_declaration=True,
standalone=True,
pretty_print=True)
with open(fname, 'wb') as fid:
fid.write(out)
def find_id(self, element_id):
"""Find elements with the given ID"""
find = etree.XPath("//*[@id=$id]")
return FigureElement(find(self.root, id=element_id)[0])
def get_size(self):
"""Get figure size"""
return self.root.get('width'), self.root.get('height')
def set_size(self, size):
"""Set figure size"""
w, h = size
self.root.set('width', w)
self.root.set('height', h)
def fromstring(text):
"""Create a SVG figure from a string.
Parameters
----------
text : str
string representing the SVG content. Must be valid SVG.
Returns
-------
SVGFigure
newly created :py:class:`SVGFigure` initialised with the string
content.
"""
fig = SVGFigure()
svg = etree.fromstring(text.encode())
fig.root = svg
return fig
def from_mpl(fig, savefig_kw=None):
"""Create a SVG figure from a ``matplotlib`` figure.
Parameters
----------
fig : matplotlib.Figure instance
savefig_kw : dict
keyword arguments to be passed to matplotlib's
`savefig`
Returns
-------
SVGFigure
newly created :py:class:`SVGFigure` initialised with the string
content.
Examples
--------
If you want to overlay the figure on another SVG, you may want to pass
the `transparent` option:
>>> from svgutils import transform
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> line, = plt.plot([1,2])
>>> svgfig = transform.from_mpl(fig,
... savefig_kw=dict(transparent=True))
>>> svgfig.getroot()
<svgutils.transform.GroupElement object at ...>
"""
fid = StringIO()
if savefig_kw is None:
savefig_kw = {}
try:
fig.savefig(fid, format='svg', **savefig_kw)
except ValueError:
raise(ValueError, "No matplotlib SVG backend")
fid.seek(0)
fig = fromstring(fid.read())
# workaround mpl units bug
w, h = fig.get_size()
fig.set_size((w.replace('pt', ''), h.replace('pt', '')))
return fig
|
btel/svg_utils | src/svgutils/transform.py | fromstring | python | def fromstring(text):
fig = SVGFigure()
svg = etree.fromstring(text.encode())
fig.root = svg
return fig | Create a SVG figure from a string.
Parameters
----------
text : str
string representing the SVG content. Must be valid SVG.
Returns
-------
SVGFigure
newly created :py:class:`SVGFigure` initialised with the string
content. | train | https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/transform.py#L315-L334 | null | from lxml import etree
from copy import deepcopy
import codecs
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
SVG_NAMESPACE = "http://www.w3.org/2000/svg"
XLINK_NAMESPACE = "http://www.w3.org/1999/xlink"
SVG = "{%s}" % SVG_NAMESPACE
XLINK = "{%s}" % XLINK_NAMESPACE
NSMAP = {None: SVG_NAMESPACE,
'xlink': XLINK_NAMESPACE}
class FigureElement(object):
"""Base class representing single figure element"""
def __init__(self, xml_element, defs=None):
self.root = xml_element
def moveto(self, x, y, scale=1):
"""Move and scale element.
Parameters
----------
x, y : float
displacement in x and y coordinates in user units ('px').
scale : float
scaling factor. To scale down scale < 1, scale up scale > 1.
For no scaling scale = 1.
"""
self.root.set("transform", "translate(%s, %s) scale(%s) %s" %
(x, y, scale, self.root.get("transform") or ''))
def rotate(self, angle, x=0, y=0):
"""Rotate element by given angle around given pivot.
Parameters
----------
angle : float
rotation angle in degrees
x, y : float
pivot coordinates in user coordinate system (defaults to top-left
corner of the figure)
"""
self.root.set("transform", "%s rotate(%f %f %f)" %
(self.root.get("transform") or '', angle, x, y))
def skew(self, x=0, y=0):
"""Skew the element by x and y degrees
Convenience function which calls skew_x and skew_y
Parameters
----------
x,y : float, float
skew angle in degrees (default 0)
If an x/y angle is given as zero degrees, that transformation is omitted.
"""
if x is not 0:
self.skew_x(x)
if y is not 0:
self.skew_y(y)
return self
def skew_x(self, x):
"""Skew element along the x-axis by the given angle.
Parameters
----------
x : float
x-axis skew angle in degrees
"""
self.root.set("transform", "%s skewX(%f)" %
(self.root.get("transform") or '', x))
return self
def skew_y(self, y):
"""Skew element along the y-axis by the given angle.
Parameters
----------
y : float
y-axis skew angle in degrees
"""
self.root.set("transform", "%s skewY(%f)" %
(self.root.get("transform") or '', y))
return self
def scale_xy(self, x=0, y=None):
"""Scale element separately across the two axes x and y.
If y is not provided, it is assumed equal to x (according to the
W3 specification).
Parameters
----------
x : float
x-axis scaling factor. To scale down x < 1, scale up x > 1.
y : (optional) float
y-axis scaling factor. To scale down y < 1, scale up y > 1.
"""
self.root.set("transform", "%s scale(%f %f)" %
(self.root.get("transform") or '',
x, y if y is not None else ''))
def __getitem__(self, i):
return FigureElement(self.root.getchildren()[i])
def copy(self):
"""Make a copy of the element"""
return deepcopy(self.root)
def tostr(self):
"""String representation of the element"""
return etree.tostring(self.root, pretty_print=True)
def find_id(self, element_id):
"""Find element by its id.
Parameters
----------
element_id : str
ID of the element to find
Returns
-------
FigureElement
one of the children element with the given ID."""
find = etree.XPath("//*[@id=$id]")
return FigureElement(find(self.root, id=element_id)[0])
class TextElement(FigureElement):
"""Text element.
Corresponds to SVG ``<text>`` tag."""
def __init__(self, x, y, text, size=8, font="Verdana",
weight="normal", letterspacing=0, anchor='start',
color='black'):
txt = etree.Element(SVG+"text", {"x": str(x), "y": str(y),
"font-size": str(size),
"font-family": font,
"font-weight": weight,
"letter-spacing": str(letterspacing),
"text-anchor": str(anchor),
"fill": str(color)})
txt.text = text
FigureElement.__init__(self, txt)
class ImageElement(FigureElement):
"""Inline image element.
Correspoonds to SVG ``<image>`` tag. Image data encoded as base64 string.
"""
def __init__(self, stream, width, height, format='png'):
base64str = codecs.encode(stream.read(), 'base64').rstrip()
uri = "data:image/{};base64,{}".format(format,
base64str.decode('ascii'))
attrs = {
'width': str(width),
'height': str(height),
XLINK+'href': uri
}
img = etree.Element(SVG+"image", attrs)
FigureElement.__init__(self, img)
class LineElement(FigureElement):
"""Line element.
Corresponds to SVG ``<path>`` tag. It handles only piecewise
straight segments
"""
def __init__(self, points, width=1, color='black'):
linedata = "M{} {} ".format(*points[0])
linedata += " ".join(map(lambda x: "L{} {}".format(*x), points[1:]))
line = etree.Element(SVG+"path",
{"d": linedata,
"stroke-width": str(width),
"stroke": color})
FigureElement.__init__(self, line)
class GroupElement(FigureElement):
"""Group element.
Container for other elements. Corresponds to SVG ``<g>`` tag.
"""
def __init__(self, element_list, attrib=None):
new_group = etree.Element(SVG+"g", attrib=attrib)
for e in element_list:
if isinstance(e, FigureElement):
new_group.append(e.root)
else:
new_group.append(e)
self.root = new_group
class SVGFigure(object):
"""SVG Figure.
It setups standalone SVG tree. It corresponds to SVG ``<svg>`` tag.
"""
def __init__(self, width=None, height=None):
self.root = etree.Element(SVG+"svg", nsmap=NSMAP)
self.root.set("version", "1.1")
if width:
self.width = width
if height:
self.height = height
@property
def width(self):
"""Figure width"""
return self.root.get("width")
@width.setter
def width(self, value):
self.root.set('width', str(value))
self.root.set("viewBox", "0 0 %s %s" % (self.width, self.height))
@property
def height(self):
"""Figure height"""
return self.root.get("height")
@height.setter
def height(self, value):
self.root.set('height', str(value))
self.root.set("viewBox", "0 0 %s %s" % (self.width, self.height))
def append(self, element):
"""Append new element to the SVG figure"""
try:
self.root.append(element.root)
except AttributeError:
self.root.append(GroupElement(element).root)
def getroot(self):
"""Return the root element of the figure.
The root element is a group of elements after stripping the toplevel
``<svg>`` tag.
Returns
-------
GroupElement
All elements of the figure without the ``<svg>`` tag.
"""
if 'class' in self.root.attrib:
attrib = {'class': self.root.attrib['class']}
else:
attrib = None
return GroupElement(self.root.getchildren(), attrib=attrib)
def to_str(self):
"""
Returns a string of the SVG figure.
"""
return etree.tostring(self.root, xml_declaration=True,
standalone=True,
pretty_print=True)
def save(self, fname):
"""Save figure to a file"""
out = etree.tostring(self.root, xml_declaration=True,
standalone=True,
pretty_print=True)
with open(fname, 'wb') as fid:
fid.write(out)
def find_id(self, element_id):
"""Find elements with the given ID"""
find = etree.XPath("//*[@id=$id]")
return FigureElement(find(self.root, id=element_id)[0])
def get_size(self):
"""Get figure size"""
return self.root.get('width'), self.root.get('height')
def set_size(self, size):
"""Set figure size"""
w, h = size
self.root.set('width', w)
self.root.set('height', h)
def fromfile(fname):
"""Open SVG figure from file.
Parameters
----------
fname : str
name of the SVG file
Returns
-------
SVGFigure
newly created :py:class:`SVGFigure` initialised with the file content
"""
fig = SVGFigure()
with open(fname) as fid:
svg_file = etree.parse(fid)
fig.root = svg_file.getroot()
return fig
def from_mpl(fig, savefig_kw=None):
"""Create a SVG figure from a ``matplotlib`` figure.
Parameters
----------
fig : matplotlib.Figure instance
savefig_kw : dict
keyword arguments to be passed to matplotlib's
`savefig`
Returns
-------
SVGFigure
newly created :py:class:`SVGFigure` initialised with the string
content.
Examples
--------
If you want to overlay the figure on another SVG, you may want to pass
the `transparent` option:
>>> from svgutils import transform
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> line, = plt.plot([1,2])
>>> svgfig = transform.from_mpl(fig,
... savefig_kw=dict(transparent=True))
>>> svgfig.getroot()
<svgutils.transform.GroupElement object at ...>
"""
fid = StringIO()
if savefig_kw is None:
savefig_kw = {}
try:
fig.savefig(fid, format='svg', **savefig_kw)
except ValueError:
raise(ValueError, "No matplotlib SVG backend")
fid.seek(0)
fig = fromstring(fid.read())
# workaround mpl units bug
w, h = fig.get_size()
fig.set_size((w.replace('pt', ''), h.replace('pt', '')))
return fig
|
btel/svg_utils | src/svgutils/transform.py | from_mpl | python | def from_mpl(fig, savefig_kw=None):
fid = StringIO()
if savefig_kw is None:
savefig_kw = {}
try:
fig.savefig(fid, format='svg', **savefig_kw)
except ValueError:
raise(ValueError, "No matplotlib SVG backend")
fid.seek(0)
fig = fromstring(fid.read())
# workaround mpl units bug
w, h = fig.get_size()
fig.set_size((w.replace('pt', ''), h.replace('pt', '')))
return fig | Create a SVG figure from a ``matplotlib`` figure.
Parameters
----------
fig : matplotlib.Figure instance
savefig_kw : dict
keyword arguments to be passed to matplotlib's
`savefig`
Returns
-------
SVGFigure
newly created :py:class:`SVGFigure` initialised with the string
content.
Examples
--------
If you want to overlay the figure on another SVG, you may want to pass
the `transparent` option:
>>> from svgutils import transform
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> line, = plt.plot([1,2])
>>> svgfig = transform.from_mpl(fig,
... savefig_kw=dict(transparent=True))
>>> svgfig.getroot()
<svgutils.transform.GroupElement object at ...> | train | https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/transform.py#L337-L390 | [
"def fromstring(text):\n \"\"\"Create a SVG figure from a string.\n\n Parameters\n ----------\n text : str\n string representing the SVG content. Must be valid SVG.\n\n Returns\n -------\n SVGFigure\n newly created :py:class:`SVGFigure` initialised with the string\n content.\n \"\"\"\n fig = SVGFigure()\n svg = etree.fromstring(text.encode())\n\n fig.root = svg\n\n return fig\n",
"def get_size(self):\n \"\"\"Get figure size\"\"\"\n return self.root.get('width'), self.root.get('height')\n",
"def set_size(self, size):\n \"\"\"Set figure size\"\"\"\n w, h = size\n self.root.set('width', w)\n self.root.set('height', h)\n"
] | from lxml import etree
from copy import deepcopy
import codecs
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
SVG_NAMESPACE = "http://www.w3.org/2000/svg"
XLINK_NAMESPACE = "http://www.w3.org/1999/xlink"
SVG = "{%s}" % SVG_NAMESPACE
XLINK = "{%s}" % XLINK_NAMESPACE
NSMAP = {None: SVG_NAMESPACE,
'xlink': XLINK_NAMESPACE}
class FigureElement(object):
"""Base class representing single figure element"""
def __init__(self, xml_element, defs=None):
self.root = xml_element
def moveto(self, x, y, scale=1):
"""Move and scale element.
Parameters
----------
x, y : float
displacement in x and y coordinates in user units ('px').
scale : float
scaling factor. To scale down scale < 1, scale up scale > 1.
For no scaling scale = 1.
"""
self.root.set("transform", "translate(%s, %s) scale(%s) %s" %
(x, y, scale, self.root.get("transform") or ''))
def rotate(self, angle, x=0, y=0):
"""Rotate element by given angle around given pivot.
Parameters
----------
angle : float
rotation angle in degrees
x, y : float
pivot coordinates in user coordinate system (defaults to top-left
corner of the figure)
"""
self.root.set("transform", "%s rotate(%f %f %f)" %
(self.root.get("transform") or '', angle, x, y))
def skew(self, x=0, y=0):
"""Skew the element by x and y degrees
Convenience function which calls skew_x and skew_y
Parameters
----------
x,y : float, float
skew angle in degrees (default 0)
If an x/y angle is given as zero degrees, that transformation is omitted.
"""
if x is not 0:
self.skew_x(x)
if y is not 0:
self.skew_y(y)
return self
def skew_x(self, x):
"""Skew element along the x-axis by the given angle.
Parameters
----------
x : float
x-axis skew angle in degrees
"""
self.root.set("transform", "%s skewX(%f)" %
(self.root.get("transform") or '', x))
return self
def skew_y(self, y):
"""Skew element along the y-axis by the given angle.
Parameters
----------
y : float
y-axis skew angle in degrees
"""
self.root.set("transform", "%s skewY(%f)" %
(self.root.get("transform") or '', y))
return self
def scale_xy(self, x=0, y=None):
"""Scale element separately across the two axes x and y.
If y is not provided, it is assumed equal to x (according to the
W3 specification).
Parameters
----------
x : float
x-axis scaling factor. To scale down x < 1, scale up x > 1.
y : (optional) float
y-axis scaling factor. To scale down y < 1, scale up y > 1.
"""
self.root.set("transform", "%s scale(%f %f)" %
(self.root.get("transform") or '',
x, y if y is not None else ''))
def __getitem__(self, i):
return FigureElement(self.root.getchildren()[i])
def copy(self):
"""Make a copy of the element"""
return deepcopy(self.root)
def tostr(self):
"""String representation of the element"""
return etree.tostring(self.root, pretty_print=True)
def find_id(self, element_id):
"""Find element by its id.
Parameters
----------
element_id : str
ID of the element to find
Returns
-------
FigureElement
one of the children element with the given ID."""
find = etree.XPath("//*[@id=$id]")
return FigureElement(find(self.root, id=element_id)[0])
class TextElement(FigureElement):
"""Text element.
Corresponds to SVG ``<text>`` tag."""
def __init__(self, x, y, text, size=8, font="Verdana",
weight="normal", letterspacing=0, anchor='start',
color='black'):
txt = etree.Element(SVG+"text", {"x": str(x), "y": str(y),
"font-size": str(size),
"font-family": font,
"font-weight": weight,
"letter-spacing": str(letterspacing),
"text-anchor": str(anchor),
"fill": str(color)})
txt.text = text
FigureElement.__init__(self, txt)
class ImageElement(FigureElement):
"""Inline image element.
Correspoonds to SVG ``<image>`` tag. Image data encoded as base64 string.
"""
def __init__(self, stream, width, height, format='png'):
base64str = codecs.encode(stream.read(), 'base64').rstrip()
uri = "data:image/{};base64,{}".format(format,
base64str.decode('ascii'))
attrs = {
'width': str(width),
'height': str(height),
XLINK+'href': uri
}
img = etree.Element(SVG+"image", attrs)
FigureElement.__init__(self, img)
class LineElement(FigureElement):
"""Line element.
Corresponds to SVG ``<path>`` tag. It handles only piecewise
straight segments
"""
def __init__(self, points, width=1, color='black'):
linedata = "M{} {} ".format(*points[0])
linedata += " ".join(map(lambda x: "L{} {}".format(*x), points[1:]))
line = etree.Element(SVG+"path",
{"d": linedata,
"stroke-width": str(width),
"stroke": color})
FigureElement.__init__(self, line)
class GroupElement(FigureElement):
"""Group element.
Container for other elements. Corresponds to SVG ``<g>`` tag.
"""
def __init__(self, element_list, attrib=None):
new_group = etree.Element(SVG+"g", attrib=attrib)
for e in element_list:
if isinstance(e, FigureElement):
new_group.append(e.root)
else:
new_group.append(e)
self.root = new_group
class SVGFigure(object):
"""SVG Figure.
It setups standalone SVG tree. It corresponds to SVG ``<svg>`` tag.
"""
def __init__(self, width=None, height=None):
self.root = etree.Element(SVG+"svg", nsmap=NSMAP)
self.root.set("version", "1.1")
if width:
self.width = width
if height:
self.height = height
@property
def width(self):
"""Figure width"""
return self.root.get("width")
@width.setter
def width(self, value):
self.root.set('width', str(value))
self.root.set("viewBox", "0 0 %s %s" % (self.width, self.height))
@property
def height(self):
"""Figure height"""
return self.root.get("height")
@height.setter
def height(self, value):
self.root.set('height', str(value))
self.root.set("viewBox", "0 0 %s %s" % (self.width, self.height))
def append(self, element):
"""Append new element to the SVG figure"""
try:
self.root.append(element.root)
except AttributeError:
self.root.append(GroupElement(element).root)
def getroot(self):
"""Return the root element of the figure.
The root element is a group of elements after stripping the toplevel
``<svg>`` tag.
Returns
-------
GroupElement
All elements of the figure without the ``<svg>`` tag.
"""
if 'class' in self.root.attrib:
attrib = {'class': self.root.attrib['class']}
else:
attrib = None
return GroupElement(self.root.getchildren(), attrib=attrib)
def to_str(self):
"""
Returns a string of the SVG figure.
"""
return etree.tostring(self.root, xml_declaration=True,
standalone=True,
pretty_print=True)
def save(self, fname):
"""Save figure to a file"""
out = etree.tostring(self.root, xml_declaration=True,
standalone=True,
pretty_print=True)
with open(fname, 'wb') as fid:
fid.write(out)
def find_id(self, element_id):
"""Find elements with the given ID"""
find = etree.XPath("//*[@id=$id]")
return FigureElement(find(self.root, id=element_id)[0])
def get_size(self):
"""Get figure size"""
return self.root.get('width'), self.root.get('height')
def set_size(self, size):
"""Set figure size"""
w, h = size
self.root.set('width', w)
self.root.set('height', h)
def fromfile(fname):
"""Open SVG figure from file.
Parameters
----------
fname : str
name of the SVG file
Returns
-------
SVGFigure
newly created :py:class:`SVGFigure` initialised with the file content
"""
fig = SVGFigure()
with open(fname) as fid:
svg_file = etree.parse(fid)
fig.root = svg_file.getroot()
return fig
def fromstring(text):
"""Create a SVG figure from a string.
Parameters
----------
text : str
string representing the SVG content. Must be valid SVG.
Returns
-------
SVGFigure
newly created :py:class:`SVGFigure` initialised with the string
content.
"""
fig = SVGFigure()
svg = etree.fromstring(text.encode())
fig.root = svg
return fig
|
btel/svg_utils | src/svgutils/transform.py | FigureElement.moveto | python | def moveto(self, x, y, scale=1):
self.root.set("transform", "translate(%s, %s) scale(%s) %s" %
(x, y, scale, self.root.get("transform") or '')) | Move and scale element.
Parameters
----------
x, y : float
displacement in x and y coordinates in user units ('px').
scale : float
scaling factor. To scale down scale < 1, scale up scale > 1.
For no scaling scale = 1. | train | https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/transform.py#L24-L36 | null | class FigureElement(object):
"""Base class representing single figure element"""
def __init__(self, xml_element, defs=None):
self.root = xml_element
def rotate(self, angle, x=0, y=0):
"""Rotate element by given angle around given pivot.
Parameters
----------
angle : float
rotation angle in degrees
x, y : float
pivot coordinates in user coordinate system (defaults to top-left
corner of the figure)
"""
self.root.set("transform", "%s rotate(%f %f %f)" %
(self.root.get("transform") or '', angle, x, y))
def skew(self, x=0, y=0):
"""Skew the element by x and y degrees
Convenience function which calls skew_x and skew_y
Parameters
----------
x,y : float, float
skew angle in degrees (default 0)
If an x/y angle is given as zero degrees, that transformation is omitted.
"""
if x is not 0:
self.skew_x(x)
if y is not 0:
self.skew_y(y)
return self
def skew_x(self, x):
"""Skew element along the x-axis by the given angle.
Parameters
----------
x : float
x-axis skew angle in degrees
"""
self.root.set("transform", "%s skewX(%f)" %
(self.root.get("transform") or '', x))
return self
def skew_y(self, y):
"""Skew element along the y-axis by the given angle.
Parameters
----------
y : float
y-axis skew angle in degrees
"""
self.root.set("transform", "%s skewY(%f)" %
(self.root.get("transform") or '', y))
return self
def scale_xy(self, x=0, y=None):
"""Scale element separately across the two axes x and y.
If y is not provided, it is assumed equal to x (according to the
W3 specification).
Parameters
----------
x : float
x-axis scaling factor. To scale down x < 1, scale up x > 1.
y : (optional) float
y-axis scaling factor. To scale down y < 1, scale up y > 1.
"""
self.root.set("transform", "%s scale(%f %f)" %
(self.root.get("transform") or '',
x, y if y is not None else ''))
def __getitem__(self, i):
return FigureElement(self.root.getchildren()[i])
def copy(self):
"""Make a copy of the element"""
return deepcopy(self.root)
def tostr(self):
"""String representation of the element"""
return etree.tostring(self.root, pretty_print=True)
def find_id(self, element_id):
"""Find element by its id.
Parameters
----------
element_id : str
ID of the element to find
Returns
-------
FigureElement
one of the children element with the given ID."""
find = etree.XPath("//*[@id=$id]")
return FigureElement(find(self.root, id=element_id)[0])
|
btel/svg_utils | src/svgutils/transform.py | FigureElement.rotate | python | def rotate(self, angle, x=0, y=0):
self.root.set("transform", "%s rotate(%f %f %f)" %
(self.root.get("transform") or '', angle, x, y)) | Rotate element by given angle around given pivot.
Parameters
----------
angle : float
rotation angle in degrees
x, y : float
pivot coordinates in user coordinate system (defaults to top-left
corner of the figure) | train | https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/transform.py#L38-L50 | null | class FigureElement(object):
"""Base class representing single figure element"""
def __init__(self, xml_element, defs=None):
self.root = xml_element
def moveto(self, x, y, scale=1):
"""Move and scale element.
Parameters
----------
x, y : float
displacement in x and y coordinates in user units ('px').
scale : float
scaling factor. To scale down scale < 1, scale up scale > 1.
For no scaling scale = 1.
"""
self.root.set("transform", "translate(%s, %s) scale(%s) %s" %
(x, y, scale, self.root.get("transform") or ''))
def skew(self, x=0, y=0):
"""Skew the element by x and y degrees
Convenience function which calls skew_x and skew_y
Parameters
----------
x,y : float, float
skew angle in degrees (default 0)
If an x/y angle is given as zero degrees, that transformation is omitted.
"""
if x is not 0:
self.skew_x(x)
if y is not 0:
self.skew_y(y)
return self
def skew_x(self, x):
"""Skew element along the x-axis by the given angle.
Parameters
----------
x : float
x-axis skew angle in degrees
"""
self.root.set("transform", "%s skewX(%f)" %
(self.root.get("transform") or '', x))
return self
def skew_y(self, y):
"""Skew element along the y-axis by the given angle.
Parameters
----------
y : float
y-axis skew angle in degrees
"""
self.root.set("transform", "%s skewY(%f)" %
(self.root.get("transform") or '', y))
return self
def scale_xy(self, x=0, y=None):
"""Scale element separately across the two axes x and y.
If y is not provided, it is assumed equal to x (according to the
W3 specification).
Parameters
----------
x : float
x-axis scaling factor. To scale down x < 1, scale up x > 1.
y : (optional) float
y-axis scaling factor. To scale down y < 1, scale up y > 1.
"""
self.root.set("transform", "%s scale(%f %f)" %
(self.root.get("transform") or '',
x, y if y is not None else ''))
def __getitem__(self, i):
return FigureElement(self.root.getchildren()[i])
def copy(self):
"""Make a copy of the element"""
return deepcopy(self.root)
def tostr(self):
"""String representation of the element"""
return etree.tostring(self.root, pretty_print=True)
def find_id(self, element_id):
"""Find element by its id.
Parameters
----------
element_id : str
ID of the element to find
Returns
-------
FigureElement
one of the children element with the given ID."""
find = etree.XPath("//*[@id=$id]")
return FigureElement(find(self.root, id=element_id)[0])
|
btel/svg_utils | src/svgutils/transform.py | FigureElement.skew | python | def skew(self, x=0, y=0):
if x is not 0:
self.skew_x(x)
if y is not 0:
self.skew_y(y)
return self | Skew the element by x and y degrees
Convenience function which calls skew_x and skew_y
Parameters
----------
x,y : float, float
skew angle in degrees (default 0)
If an x/y angle is given as zero degrees, that transformation is omitted. | train | https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/transform.py#L52-L68 | [
"def skew_x(self, x):\n \"\"\"Skew element along the x-axis by the given angle.\n\n Parameters\n ----------\n x : float\n x-axis skew angle in degrees\n \"\"\"\n self.root.set(\"transform\", \"%s skewX(%f)\" %\n (self.root.get(\"transform\") or '', x))\n return self\n",
"def skew_y(self, y):\n \"\"\"Skew element along the y-axis by the given angle.\n\n Parameters\n ----------\n y : float\n y-axis skew angle in degrees\n \"\"\"\n self.root.set(\"transform\", \"%s skewY(%f)\" %\n (self.root.get(\"transform\") or '', y))\n return self\n"
] | class FigureElement(object):
"""Base class representing single figure element"""
def __init__(self, xml_element, defs=None):
self.root = xml_element
def moveto(self, x, y, scale=1):
"""Move and scale element.
Parameters
----------
x, y : float
displacement in x and y coordinates in user units ('px').
scale : float
scaling factor. To scale down scale < 1, scale up scale > 1.
For no scaling scale = 1.
"""
self.root.set("transform", "translate(%s, %s) scale(%s) %s" %
(x, y, scale, self.root.get("transform") or ''))
def rotate(self, angle, x=0, y=0):
"""Rotate element by given angle around given pivot.
Parameters
----------
angle : float
rotation angle in degrees
x, y : float
pivot coordinates in user coordinate system (defaults to top-left
corner of the figure)
"""
self.root.set("transform", "%s rotate(%f %f %f)" %
(self.root.get("transform") or '', angle, x, y))
def skew_x(self, x):
"""Skew element along the x-axis by the given angle.
Parameters
----------
x : float
x-axis skew angle in degrees
"""
self.root.set("transform", "%s skewX(%f)" %
(self.root.get("transform") or '', x))
return self
def skew_y(self, y):
"""Skew element along the y-axis by the given angle.
Parameters
----------
y : float
y-axis skew angle in degrees
"""
self.root.set("transform", "%s skewY(%f)" %
(self.root.get("transform") or '', y))
return self
def scale_xy(self, x=0, y=None):
"""Scale element separately across the two axes x and y.
If y is not provided, it is assumed equal to x (according to the
W3 specification).
Parameters
----------
x : float
x-axis scaling factor. To scale down x < 1, scale up x > 1.
y : (optional) float
y-axis scaling factor. To scale down y < 1, scale up y > 1.
"""
self.root.set("transform", "%s scale(%f %f)" %
(self.root.get("transform") or '',
x, y if y is not None else ''))
def __getitem__(self, i):
return FigureElement(self.root.getchildren()[i])
def copy(self):
"""Make a copy of the element"""
return deepcopy(self.root)
def tostr(self):
"""String representation of the element"""
return etree.tostring(self.root, pretty_print=True)
def find_id(self, element_id):
"""Find element by its id.
Parameters
----------
element_id : str
ID of the element to find
Returns
-------
FigureElement
one of the children element with the given ID."""
find = etree.XPath("//*[@id=$id]")
return FigureElement(find(self.root, id=element_id)[0])
|
btel/svg_utils | src/svgutils/transform.py | FigureElement.skew_x | python | def skew_x(self, x):
self.root.set("transform", "%s skewX(%f)" %
(self.root.get("transform") or '', x))
return self | Skew element along the x-axis by the given angle.
Parameters
----------
x : float
x-axis skew angle in degrees | train | https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/transform.py#L70-L80 | null | class FigureElement(object):
"""Base class representing single figure element"""
def __init__(self, xml_element, defs=None):
self.root = xml_element
def moveto(self, x, y, scale=1):
"""Move and scale element.
Parameters
----------
x, y : float
displacement in x and y coordinates in user units ('px').
scale : float
scaling factor. To scale down scale < 1, scale up scale > 1.
For no scaling scale = 1.
"""
self.root.set("transform", "translate(%s, %s) scale(%s) %s" %
(x, y, scale, self.root.get("transform") or ''))
def rotate(self, angle, x=0, y=0):
"""Rotate element by given angle around given pivot.
Parameters
----------
angle : float
rotation angle in degrees
x, y : float
pivot coordinates in user coordinate system (defaults to top-left
corner of the figure)
"""
self.root.set("transform", "%s rotate(%f %f %f)" %
(self.root.get("transform") or '', angle, x, y))
def skew(self, x=0, y=0):
"""Skew the element by x and y degrees
Convenience function which calls skew_x and skew_y
Parameters
----------
x,y : float, float
skew angle in degrees (default 0)
If an x/y angle is given as zero degrees, that transformation is omitted.
"""
if x is not 0:
self.skew_x(x)
if y is not 0:
self.skew_y(y)
return self
def skew_y(self, y):
"""Skew element along the y-axis by the given angle.
Parameters
----------
y : float
y-axis skew angle in degrees
"""
self.root.set("transform", "%s skewY(%f)" %
(self.root.get("transform") or '', y))
return self
def scale_xy(self, x=0, y=None):
"""Scale element separately across the two axes x and y.
If y is not provided, it is assumed equal to x (according to the
W3 specification).
Parameters
----------
x : float
x-axis scaling factor. To scale down x < 1, scale up x > 1.
y : (optional) float
y-axis scaling factor. To scale down y < 1, scale up y > 1.
"""
self.root.set("transform", "%s scale(%f %f)" %
(self.root.get("transform") or '',
x, y if y is not None else ''))
def __getitem__(self, i):
return FigureElement(self.root.getchildren()[i])
def copy(self):
"""Make a copy of the element"""
return deepcopy(self.root)
def tostr(self):
"""String representation of the element"""
return etree.tostring(self.root, pretty_print=True)
def find_id(self, element_id):
"""Find element by its id.
Parameters
----------
element_id : str
ID of the element to find
Returns
-------
FigureElement
one of the children element with the given ID."""
find = etree.XPath("//*[@id=$id]")
return FigureElement(find(self.root, id=element_id)[0])
|
btel/svg_utils | src/svgutils/transform.py | FigureElement.skew_y | python | def skew_y(self, y):
self.root.set("transform", "%s skewY(%f)" %
(self.root.get("transform") or '', y))
return self | Skew element along the y-axis by the given angle.
Parameters
----------
y : float
y-axis skew angle in degrees | train | https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/transform.py#L82-L92 | null | class FigureElement(object):
"""Base class representing single figure element"""
def __init__(self, xml_element, defs=None):
self.root = xml_element
def moveto(self, x, y, scale=1):
"""Move and scale element.
Parameters
----------
x, y : float
displacement in x and y coordinates in user units ('px').
scale : float
scaling factor. To scale down scale < 1, scale up scale > 1.
For no scaling scale = 1.
"""
self.root.set("transform", "translate(%s, %s) scale(%s) %s" %
(x, y, scale, self.root.get("transform") or ''))
def rotate(self, angle, x=0, y=0):
"""Rotate element by given angle around given pivot.
Parameters
----------
angle : float
rotation angle in degrees
x, y : float
pivot coordinates in user coordinate system (defaults to top-left
corner of the figure)
"""
self.root.set("transform", "%s rotate(%f %f %f)" %
(self.root.get("transform") or '', angle, x, y))
def skew(self, x=0, y=0):
"""Skew the element by x and y degrees
Convenience function which calls skew_x and skew_y
Parameters
----------
x,y : float, float
skew angle in degrees (default 0)
If an x/y angle is given as zero degrees, that transformation is omitted.
"""
if x is not 0:
self.skew_x(x)
if y is not 0:
self.skew_y(y)
return self
def skew_x(self, x):
"""Skew element along the x-axis by the given angle.
Parameters
----------
x : float
x-axis skew angle in degrees
"""
self.root.set("transform", "%s skewX(%f)" %
(self.root.get("transform") or '', x))
return self
def scale_xy(self, x=0, y=None):
"""Scale element separately across the two axes x and y.
If y is not provided, it is assumed equal to x (according to the
W3 specification).
Parameters
----------
x : float
x-axis scaling factor. To scale down x < 1, scale up x > 1.
y : (optional) float
y-axis scaling factor. To scale down y < 1, scale up y > 1.
"""
self.root.set("transform", "%s scale(%f %f)" %
(self.root.get("transform") or '',
x, y if y is not None else ''))
def __getitem__(self, i):
return FigureElement(self.root.getchildren()[i])
def copy(self):
"""Make a copy of the element"""
return deepcopy(self.root)
def tostr(self):
"""String representation of the element"""
return etree.tostring(self.root, pretty_print=True)
def find_id(self, element_id):
"""Find element by its id.
Parameters
----------
element_id : str
ID of the element to find
Returns
-------
FigureElement
one of the children element with the given ID."""
find = etree.XPath("//*[@id=$id]")
return FigureElement(find(self.root, id=element_id)[0])
|
btel/svg_utils | src/svgutils/transform.py | FigureElement.scale_xy | python | def scale_xy(self, x=0, y=None):
self.root.set("transform", "%s scale(%f %f)" %
(self.root.get("transform") or '',
x, y if y is not None else '')) | Scale element separately across the two axes x and y.
If y is not provided, it is assumed equal to x (according to the
W3 specification).
Parameters
----------
x : float
x-axis scaling factor. To scale down x < 1, scale up x > 1.
y : (optional) float
y-axis scaling factor. To scale down y < 1, scale up y > 1. | train | https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/transform.py#L94-L109 | null | class FigureElement(object):
"""Base class representing single figure element"""
def __init__(self, xml_element, defs=None):
self.root = xml_element
def moveto(self, x, y, scale=1):
"""Move and scale element.
Parameters
----------
x, y : float
displacement in x and y coordinates in user units ('px').
scale : float
scaling factor. To scale down scale < 1, scale up scale > 1.
For no scaling scale = 1.
"""
self.root.set("transform", "translate(%s, %s) scale(%s) %s" %
(x, y, scale, self.root.get("transform") or ''))
def rotate(self, angle, x=0, y=0):
"""Rotate element by given angle around given pivot.
Parameters
----------
angle : float
rotation angle in degrees
x, y : float
pivot coordinates in user coordinate system (defaults to top-left
corner of the figure)
"""
self.root.set("transform", "%s rotate(%f %f %f)" %
(self.root.get("transform") or '', angle, x, y))
def skew(self, x=0, y=0):
"""Skew the element by x and y degrees
Convenience function which calls skew_x and skew_y
Parameters
----------
x,y : float, float
skew angle in degrees (default 0)
If an x/y angle is given as zero degrees, that transformation is omitted.
"""
if x is not 0:
self.skew_x(x)
if y is not 0:
self.skew_y(y)
return self
def skew_x(self, x):
"""Skew element along the x-axis by the given angle.
Parameters
----------
x : float
x-axis skew angle in degrees
"""
self.root.set("transform", "%s skewX(%f)" %
(self.root.get("transform") or '', x))
return self
def skew_y(self, y):
"""Skew element along the y-axis by the given angle.
Parameters
----------
y : float
y-axis skew angle in degrees
"""
self.root.set("transform", "%s skewY(%f)" %
(self.root.get("transform") or '', y))
return self
def __getitem__(self, i):
return FigureElement(self.root.getchildren()[i])
def copy(self):
"""Make a copy of the element"""
return deepcopy(self.root)
def tostr(self):
"""String representation of the element"""
return etree.tostring(self.root, pretty_print=True)
def find_id(self, element_id):
"""Find element by its id.
Parameters
----------
element_id : str
ID of the element to find
Returns
-------
FigureElement
one of the children element with the given ID."""
find = etree.XPath("//*[@id=$id]")
return FigureElement(find(self.root, id=element_id)[0])
|
btel/svg_utils | src/svgutils/transform.py | FigureElement.find_id | python | def find_id(self, element_id):
find = etree.XPath("//*[@id=$id]")
return FigureElement(find(self.root, id=element_id)[0]) | Find element by its id.
Parameters
----------
element_id : str
ID of the element to find
Returns
-------
FigureElement
one of the children element with the given ID. | train | https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/transform.py#L122-L135 | null | class FigureElement(object):
"""Base class representing single figure element"""
def __init__(self, xml_element, defs=None):
self.root = xml_element
def moveto(self, x, y, scale=1):
"""Move and scale element.
Parameters
----------
x, y : float
displacement in x and y coordinates in user units ('px').
scale : float
scaling factor. To scale down scale < 1, scale up scale > 1.
For no scaling scale = 1.
"""
self.root.set("transform", "translate(%s, %s) scale(%s) %s" %
(x, y, scale, self.root.get("transform") or ''))
def rotate(self, angle, x=0, y=0):
"""Rotate element by given angle around given pivot.
Parameters
----------
angle : float
rotation angle in degrees
x, y : float
pivot coordinates in user coordinate system (defaults to top-left
corner of the figure)
"""
self.root.set("transform", "%s rotate(%f %f %f)" %
(self.root.get("transform") or '', angle, x, y))
def skew(self, x=0, y=0):
"""Skew the element by x and y degrees
Convenience function which calls skew_x and skew_y
Parameters
----------
x,y : float, float
skew angle in degrees (default 0)
If an x/y angle is given as zero degrees, that transformation is omitted.
"""
if x is not 0:
self.skew_x(x)
if y is not 0:
self.skew_y(y)
return self
def skew_x(self, x):
"""Skew element along the x-axis by the given angle.
Parameters
----------
x : float
x-axis skew angle in degrees
"""
self.root.set("transform", "%s skewX(%f)" %
(self.root.get("transform") or '', x))
return self
def skew_y(self, y):
"""Skew element along the y-axis by the given angle.
Parameters
----------
y : float
y-axis skew angle in degrees
"""
self.root.set("transform", "%s skewY(%f)" %
(self.root.get("transform") or '', y))
return self
def scale_xy(self, x=0, y=None):
"""Scale element separately across the two axes x and y.
If y is not provided, it is assumed equal to x (according to the
W3 specification).
Parameters
----------
x : float
x-axis scaling factor. To scale down x < 1, scale up x > 1.
y : (optional) float
y-axis scaling factor. To scale down y < 1, scale up y > 1.
"""
self.root.set("transform", "%s scale(%f %f)" %
(self.root.get("transform") or '',
x, y if y is not None else ''))
def __getitem__(self, i):
return FigureElement(self.root.getchildren()[i])
def copy(self):
"""Make a copy of the element"""
return deepcopy(self.root)
def tostr(self):
"""String representation of the element"""
return etree.tostring(self.root, pretty_print=True)
|
btel/svg_utils | src/svgutils/transform.py | SVGFigure.append | python | def append(self, element):
try:
self.root.append(element.root)
except AttributeError:
self.root.append(GroupElement(element).root) | Append new element to the SVG figure | train | https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/transform.py#L238-L243 | null | class SVGFigure(object):
"""SVG Figure.
It setups standalone SVG tree. It corresponds to SVG ``<svg>`` tag.
"""
def __init__(self, width=None, height=None):
self.root = etree.Element(SVG+"svg", nsmap=NSMAP)
self.root.set("version", "1.1")
if width:
self.width = width
if height:
self.height = height
@property
def width(self):
"""Figure width"""
return self.root.get("width")
@width.setter
def width(self, value):
self.root.set('width', str(value))
self.root.set("viewBox", "0 0 %s %s" % (self.width, self.height))
@property
def height(self):
"""Figure height"""
return self.root.get("height")
@height.setter
def height(self, value):
self.root.set('height', str(value))
self.root.set("viewBox", "0 0 %s %s" % (self.width, self.height))
def getroot(self):
"""Return the root element of the figure.
The root element is a group of elements after stripping the toplevel
``<svg>`` tag.
Returns
-------
GroupElement
All elements of the figure without the ``<svg>`` tag.
"""
if 'class' in self.root.attrib:
attrib = {'class': self.root.attrib['class']}
else:
attrib = None
return GroupElement(self.root.getchildren(), attrib=attrib)
def to_str(self):
"""
Returns a string of the SVG figure.
"""
return etree.tostring(self.root, xml_declaration=True,
standalone=True,
pretty_print=True)
def save(self, fname):
"""Save figure to a file"""
out = etree.tostring(self.root, xml_declaration=True,
standalone=True,
pretty_print=True)
with open(fname, 'wb') as fid:
fid.write(out)
def find_id(self, element_id):
"""Find elements with the given ID"""
find = etree.XPath("//*[@id=$id]")
return FigureElement(find(self.root, id=element_id)[0])
def get_size(self):
"""Get figure size"""
return self.root.get('width'), self.root.get('height')
def set_size(self, size):
"""Set figure size"""
w, h = size
self.root.set('width', w)
self.root.set('height', h)
|
btel/svg_utils | src/svgutils/transform.py | SVGFigure.getroot | python | def getroot(self):
if 'class' in self.root.attrib:
attrib = {'class': self.root.attrib['class']}
else:
attrib = None
return GroupElement(self.root.getchildren(), attrib=attrib) | Return the root element of the figure.
The root element is a group of elements after stripping the toplevel
``<svg>`` tag.
Returns
-------
GroupElement
All elements of the figure without the ``<svg>`` tag. | train | https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/transform.py#L245-L260 | null | class SVGFigure(object):
"""SVG Figure.
It setups standalone SVG tree. It corresponds to SVG ``<svg>`` tag.
"""
def __init__(self, width=None, height=None):
self.root = etree.Element(SVG+"svg", nsmap=NSMAP)
self.root.set("version", "1.1")
if width:
self.width = width
if height:
self.height = height
@property
def width(self):
"""Figure width"""
return self.root.get("width")
@width.setter
def width(self, value):
self.root.set('width', str(value))
self.root.set("viewBox", "0 0 %s %s" % (self.width, self.height))
@property
def height(self):
"""Figure height"""
return self.root.get("height")
@height.setter
def height(self, value):
self.root.set('height', str(value))
self.root.set("viewBox", "0 0 %s %s" % (self.width, self.height))
def append(self, element):
"""Append new element to the SVG figure"""
try:
self.root.append(element.root)
except AttributeError:
self.root.append(GroupElement(element).root)
def to_str(self):
"""
Returns a string of the SVG figure.
"""
return etree.tostring(self.root, xml_declaration=True,
standalone=True,
pretty_print=True)
def save(self, fname):
"""Save figure to a file"""
out = etree.tostring(self.root, xml_declaration=True,
standalone=True,
pretty_print=True)
with open(fname, 'wb') as fid:
fid.write(out)
def find_id(self, element_id):
"""Find elements with the given ID"""
find = etree.XPath("//*[@id=$id]")
return FigureElement(find(self.root, id=element_id)[0])
def get_size(self):
"""Get figure size"""
return self.root.get('width'), self.root.get('height')
def set_size(self, size):
"""Set figure size"""
w, h = size
self.root.set('width', w)
self.root.set('height', h)
|
btel/svg_utils | src/svgutils/transform.py | SVGFigure.to_str | python | def to_str(self):
return etree.tostring(self.root, xml_declaration=True,
standalone=True,
pretty_print=True) | Returns a string of the SVG figure. | train | https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/transform.py#L262-L268 | null | class SVGFigure(object):
"""SVG Figure.
It setups standalone SVG tree. It corresponds to SVG ``<svg>`` tag.
"""
def __init__(self, width=None, height=None):
self.root = etree.Element(SVG+"svg", nsmap=NSMAP)
self.root.set("version", "1.1")
if width:
self.width = width
if height:
self.height = height
@property
def width(self):
"""Figure width"""
return self.root.get("width")
@width.setter
def width(self, value):
self.root.set('width', str(value))
self.root.set("viewBox", "0 0 %s %s" % (self.width, self.height))
@property
def height(self):
"""Figure height"""
return self.root.get("height")
@height.setter
def height(self, value):
self.root.set('height', str(value))
self.root.set("viewBox", "0 0 %s %s" % (self.width, self.height))
def append(self, element):
"""Append new element to the SVG figure"""
try:
self.root.append(element.root)
except AttributeError:
self.root.append(GroupElement(element).root)
def getroot(self):
"""Return the root element of the figure.
The root element is a group of elements after stripping the toplevel
``<svg>`` tag.
Returns
-------
GroupElement
All elements of the figure without the ``<svg>`` tag.
"""
if 'class' in self.root.attrib:
attrib = {'class': self.root.attrib['class']}
else:
attrib = None
return GroupElement(self.root.getchildren(), attrib=attrib)
def save(self, fname):
"""Save figure to a file"""
out = etree.tostring(self.root, xml_declaration=True,
standalone=True,
pretty_print=True)
with open(fname, 'wb') as fid:
fid.write(out)
def find_id(self, element_id):
"""Find elements with the given ID"""
find = etree.XPath("//*[@id=$id]")
return FigureElement(find(self.root, id=element_id)[0])
def get_size(self):
"""Get figure size"""
return self.root.get('width'), self.root.get('height')
def set_size(self, size):
"""Set figure size"""
w, h = size
self.root.set('width', w)
self.root.set('height', h)
|
btel/svg_utils | src/svgutils/transform.py | SVGFigure.save | python | def save(self, fname):
out = etree.tostring(self.root, xml_declaration=True,
standalone=True,
pretty_print=True)
with open(fname, 'wb') as fid:
fid.write(out) | Save figure to a file | train | https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/transform.py#L270-L276 | null | class SVGFigure(object):
"""SVG Figure.
It setups standalone SVG tree. It corresponds to SVG ``<svg>`` tag.
"""
def __init__(self, width=None, height=None):
self.root = etree.Element(SVG+"svg", nsmap=NSMAP)
self.root.set("version", "1.1")
if width:
self.width = width
if height:
self.height = height
@property
def width(self):
"""Figure width"""
return self.root.get("width")
@width.setter
def width(self, value):
self.root.set('width', str(value))
self.root.set("viewBox", "0 0 %s %s" % (self.width, self.height))
@property
def height(self):
"""Figure height"""
return self.root.get("height")
@height.setter
def height(self, value):
self.root.set('height', str(value))
self.root.set("viewBox", "0 0 %s %s" % (self.width, self.height))
def append(self, element):
"""Append new element to the SVG figure"""
try:
self.root.append(element.root)
except AttributeError:
self.root.append(GroupElement(element).root)
def getroot(self):
"""Return the root element of the figure.
The root element is a group of elements after stripping the toplevel
``<svg>`` tag.
Returns
-------
GroupElement
All elements of the figure without the ``<svg>`` tag.
"""
if 'class' in self.root.attrib:
attrib = {'class': self.root.attrib['class']}
else:
attrib = None
return GroupElement(self.root.getchildren(), attrib=attrib)
def to_str(self):
"""
Returns a string of the SVG figure.
"""
return etree.tostring(self.root, xml_declaration=True,
standalone=True,
pretty_print=True)
def find_id(self, element_id):
"""Find elements with the given ID"""
find = etree.XPath("//*[@id=$id]")
return FigureElement(find(self.root, id=element_id)[0])
def get_size(self):
"""Get figure size"""
return self.root.get('width'), self.root.get('height')
def set_size(self, size):
"""Set figure size"""
w, h = size
self.root.set('width', w)
self.root.set('height', h)
|
btel/svg_utils | src/svgutils/transform.py | SVGFigure.set_size | python | def set_size(self, size):
w, h = size
self.root.set('width', w)
self.root.set('height', h) | Set figure size | train | https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/transform.py#L287-L291 | null | class SVGFigure(object):
"""SVG Figure.
It setups standalone SVG tree. It corresponds to SVG ``<svg>`` tag.
"""
def __init__(self, width=None, height=None):
self.root = etree.Element(SVG+"svg", nsmap=NSMAP)
self.root.set("version", "1.1")
if width:
self.width = width
if height:
self.height = height
@property
def width(self):
"""Figure width"""
return self.root.get("width")
@width.setter
def width(self, value):
self.root.set('width', str(value))
self.root.set("viewBox", "0 0 %s %s" % (self.width, self.height))
@property
def height(self):
"""Figure height"""
return self.root.get("height")
@height.setter
def height(self, value):
self.root.set('height', str(value))
self.root.set("viewBox", "0 0 %s %s" % (self.width, self.height))
def append(self, element):
"""Append new element to the SVG figure"""
try:
self.root.append(element.root)
except AttributeError:
self.root.append(GroupElement(element).root)
def getroot(self):
"""Return the root element of the figure.
The root element is a group of elements after stripping the toplevel
``<svg>`` tag.
Returns
-------
GroupElement
All elements of the figure without the ``<svg>`` tag.
"""
if 'class' in self.root.attrib:
attrib = {'class': self.root.attrib['class']}
else:
attrib = None
return GroupElement(self.root.getchildren(), attrib=attrib)
def to_str(self):
"""
Returns a string of the SVG figure.
"""
return etree.tostring(self.root, xml_declaration=True,
standalone=True,
pretty_print=True)
def save(self, fname):
"""Save figure to a file"""
out = etree.tostring(self.root, xml_declaration=True,
standalone=True,
pretty_print=True)
with open(fname, 'wb') as fid:
fid.write(out)
def find_id(self, element_id):
"""Find elements with the given ID"""
find = etree.XPath("//*[@id=$id]")
return FigureElement(find(self.root, id=element_id)[0])
def get_size(self):
"""Get figure size"""
return self.root.get('width'), self.root.get('height')
|
btel/svg_utils | src/svgutils/compose.py | Element.find_id | python | def find_id(self, element_id):
element = _transform.FigureElement.find_id(self, element_id)
return Element(element.root) | Find a single element with the given ID.
Parameters
----------
element_id : str
ID of the element to find
Returns
-------
found element | train | https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/compose.py#L67-L80 | [
"def find_id(self, element_id):\n \"\"\"Find element by its id.\n\n Parameters\n ----------\n element_id : str\n ID of the element to find\n\n Returns\n -------\n FigureElement\n one of the children element with the given ID.\"\"\"\n find = etree.XPath(\"//*[@id=$id]\")\n return FigureElement(find(self.root, id=element_id)[0])\n"
] | class Element(_transform.FigureElement):
"""Base class for new SVG elements."""
def scale(self, factor):
"""Scale SVG element.
Parameters
----------
factor : float
The scaling factor.
Factor > 1 scales up, factor < 1 scales down.
"""
self.moveto(0, 0, factor)
return self
def move(self, x, y):
"""Move the element by x, y.
Parameters
----------
x,y : int, str
amount of horizontal and vertical shift
Notes
-----
The x, y can be given with a unit (for example, "3px", "5cm"). If no
unit is given the user unit is assumed ("px"). In SVG all units are
defined in relation to the user unit [1]_.
.. [1] W3C SVG specification:
https://www.w3.org/TR/SVG/coords.html#Units
"""
self.moveto(x, y, 1)
return self
def find_ids(self, element_ids):
"""Find elements with given IDs.
Parameters
----------
element_ids : list of strings
list of IDs to find
Returns
-------
a new `Panel` object which contains all the found elements.
"""
elements = [_transform.FigureElement.find_id(self, eid)
for eid in element_ids]
return Panel(*elements)
|
btel/svg_utils | src/svgutils/compose.py | Element.find_ids | python | def find_ids(self, element_ids):
elements = [_transform.FigureElement.find_id(self, eid)
for eid in element_ids]
return Panel(*elements) | Find elements with given IDs.
Parameters
----------
element_ids : list of strings
list of IDs to find
Returns
-------
a new `Panel` object which contains all the found elements. | train | https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/compose.py#L82-L96 | null | class Element(_transform.FigureElement):
"""Base class for new SVG elements."""
def scale(self, factor):
"""Scale SVG element.
Parameters
----------
factor : float
The scaling factor.
Factor > 1 scales up, factor < 1 scales down.
"""
self.moveto(0, 0, factor)
return self
def move(self, x, y):
"""Move the element by x, y.
Parameters
----------
x,y : int, str
amount of horizontal and vertical shift
Notes
-----
The x, y can be given with a unit (for example, "3px", "5cm"). If no
unit is given the user unit is assumed ("px"). In SVG all units are
defined in relation to the user unit [1]_.
.. [1] W3C SVG specification:
https://www.w3.org/TR/SVG/coords.html#Units
"""
self.moveto(x, y, 1)
return self
def find_id(self, element_id):
"""Find a single element with the given ID.
Parameters
----------
element_id : str
ID of the element to find
Returns
-------
found element
"""
element = _transform.FigureElement.find_id(self, element_id)
return Element(element.root)
|
btel/svg_utils | src/svgutils/compose.py | Figure.save | python | def save(self, fname):
element = _transform.SVGFigure(self.width, self.height)
element.append(self)
element.save(os.path.join(CONFIG['figure.save_path'], fname)) | Save figure to SVG file.
Parameters
----------
fname : str
Full path to file. | train | https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/compose.py#L292-L302 | [
"def append(self, element):\n \"\"\"Append new element to the SVG figure\"\"\"\n try:\n self.root.append(element.root)\n except AttributeError:\n self.root.append(GroupElement(element).root)\n",
"def save(self, fname):\n \"\"\"Save figure to a file\"\"\"\n out = etree.tostring(self.root, xml_declaration=True,\n standalone=True,\n pretty_print=True)\n with open(fname, 'wb') as fid:\n fid.write(out)\n"
] | class Figure(Panel):
"""Main figure class.
This should be always the top class of all the generated SVG figures.
Parameters
----------
width, height : float or str
Figure size. If unit is not given, user units (px) are assumed.
"""
def __init__(self, width, height, *svgelements):
Panel.__init__(self, *svgelements)
self.width = Unit(width)
self.height = Unit(height)
def tostr(self):
"""Export SVG as a string"""
element = _transform.SVGFigure(self.width, self.height)
element.append(self)
svgstr = element.to_str()
return svgstr
def _repr_svg_(self):
return self.tostr().decode('ascii')
def tile(self, ncols, nrows):
"""Automatically tile the panels of the figure.
This will re-arranged all elements of the figure (first in the
hierarchy) so that they will uniformly cover the figure area.
Parameters
----------
ncols, nrows : type
The number of columns and rows to arange the elements into.
Notes
-----
ncols * nrows must be larger or equal to number of
elements, otherwise some elements will go outside the figure borders.
"""
dx = (self.width/ncols).to('px').value
dy = (self.height/nrows).to('px').value
ix, iy = 0, 0
for el in self:
el.move(dx*ix, dy*iy)
ix += 1
if ix >= ncols:
ix = 0
iy += 1
if iy > nrows:
break
return self
|
btel/svg_utils | src/svgutils/compose.py | Figure.tostr | python | def tostr(self):
element = _transform.SVGFigure(self.width, self.height)
element.append(self)
svgstr = element.to_str()
return svgstr | Export SVG as a string | train | https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/compose.py#L304-L309 | [
"def append(self, element):\n \"\"\"Append new element to the SVG figure\"\"\"\n try:\n self.root.append(element.root)\n except AttributeError:\n self.root.append(GroupElement(element).root)\n",
"def to_str(self):\n \"\"\"\n Returns a string of the SVG figure.\n \"\"\"\n return etree.tostring(self.root, xml_declaration=True,\n standalone=True,\n pretty_print=True)\n"
] | class Figure(Panel):
"""Main figure class.
This should be always the top class of all the generated SVG figures.
Parameters
----------
width, height : float or str
Figure size. If unit is not given, user units (px) are assumed.
"""
def __init__(self, width, height, *svgelements):
Panel.__init__(self, *svgelements)
self.width = Unit(width)
self.height = Unit(height)
def save(self, fname):
"""Save figure to SVG file.
Parameters
----------
fname : str
Full path to file.
"""
element = _transform.SVGFigure(self.width, self.height)
element.append(self)
element.save(os.path.join(CONFIG['figure.save_path'], fname))
def _repr_svg_(self):
return self.tostr().decode('ascii')
def tile(self, ncols, nrows):
"""Automatically tile the panels of the figure.
This will re-arranged all elements of the figure (first in the
hierarchy) so that they will uniformly cover the figure area.
Parameters
----------
ncols, nrows : type
The number of columns and rows to arange the elements into.
Notes
-----
ncols * nrows must be larger or equal to number of
elements, otherwise some elements will go outside the figure borders.
"""
dx = (self.width/ncols).to('px').value
dy = (self.height/nrows).to('px').value
ix, iy = 0, 0
for el in self:
el.move(dx*ix, dy*iy)
ix += 1
if ix >= ncols:
ix = 0
iy += 1
if iy > nrows:
break
return self
|
btel/svg_utils | src/svgutils/compose.py | Figure.tile | python | def tile(self, ncols, nrows):
dx = (self.width/ncols).to('px').value
dy = (self.height/nrows).to('px').value
ix, iy = 0, 0
for el in self:
el.move(dx*ix, dy*iy)
ix += 1
if ix >= ncols:
ix = 0
iy += 1
if iy > nrows:
break
return self | Automatically tile the panels of the figure.
This will re-arranged all elements of the figure (first in the
hierarchy) so that they will uniformly cover the figure area.
Parameters
----------
ncols, nrows : type
The number of columns and rows to arange the elements into.
Notes
-----
ncols * nrows must be larger or equal to number of
elements, otherwise some elements will go outside the figure borders. | train | https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/compose.py#L314-L342 | null | class Figure(Panel):
"""Main figure class.
This should be always the top class of all the generated SVG figures.
Parameters
----------
width, height : float or str
Figure size. If unit is not given, user units (px) are assumed.
"""
def __init__(self, width, height, *svgelements):
Panel.__init__(self, *svgelements)
self.width = Unit(width)
self.height = Unit(height)
def save(self, fname):
"""Save figure to SVG file.
Parameters
----------
fname : str
Full path to file.
"""
element = _transform.SVGFigure(self.width, self.height)
element.append(self)
element.save(os.path.join(CONFIG['figure.save_path'], fname))
def tostr(self):
"""Export SVG as a string"""
element = _transform.SVGFigure(self.width, self.height)
element.append(self)
svgstr = element.to_str()
return svgstr
def _repr_svg_(self):
return self.tostr().decode('ascii')
|
btel/svg_utils | src/svgutils/compose.py | Unit.to | python | def to(self, unit):
u = Unit("0cm")
u.value = self.value/self.per_inch[self.unit]*self.per_inch[unit]
u.unit = unit
return u | Convert to a given unit.
Parameters
----------
unit : str
Name of the unit to convert to.
Returns
-------
u : Unit
new Unit object with the requested unit and computed value. | train | https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/compose.py#L369-L385 | null | class Unit:
"""Implementaiton of SVG units and conversions between them.
Parameters
----------
measure : str
value with unit (for example, '2cm')
"""
per_inch = {'px': 90,
'cm': 2.54,
'mm': 25.4,
'pt': 1
}
def __init__(self, measure):
try:
self.value = float(measure)
self.unit = 'px'
except ValueError:
m = re.match('([0-9]+\.?[0-9]*)([a-z]+)', measure)
value, unit = m.groups()
self.value = float(value)
self.unit = unit
def __str__(self):
return "{}{}".format(self.value, self.unit)
def __repr__(self):
return "Unit({})".format(str(self))
def __mul__(self, number):
u = Unit("0cm")
u.value = self.value * number
u.unit = self.unit
return u
def __truediv__(self, number):
return self * (1./number)
def __div__(self, number):
return self * (1./number)
|
kennethreitz/requests-html | requests_html.py | user_agent | python | def user_agent(style=None) -> _UserAgent:
global useragent
if (not useragent) and style:
useragent = UserAgent()
return useragent[style] if style else DEFAULT_USER_AGENT | Returns an apparently legit user-agent, if not requested one of a specific
style. Defaults to a Chrome-style User-Agent. | train | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L665-L673 | null | import sys
import asyncio
from urllib.parse import urlparse, urlunparse, urljoin
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures._base import TimeoutError
from functools import partial
from typing import Set, Union, List, MutableMapping, Optional
import pyppeteer
import requests
from pyquery import PyQuery
from fake_useragent import UserAgent
from lxml.html.clean import Cleaner
import lxml
from lxml import etree
from lxml.html import HtmlElement
from lxml.html import tostring as lxml_html_tostring
from lxml.html.soupparser import fromstring as soup_parse
from parse import search as parse_search
from parse import findall, Result
from w3lib.encoding import html_to_unicode
DEFAULT_ENCODING = 'utf-8'
DEFAULT_URL = 'https://example.org/'
DEFAULT_USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8'
DEFAULT_NEXT_SYMBOL = ['next', 'more', 'older']
cleaner = Cleaner()
cleaner.javascript = True
cleaner.style = True
useragent = None
# Typing.
_Find = Union[List['Element'], 'Element']
_XPath = Union[List[str], List['Element'], str, 'Element']
_Result = Union[List['Result'], 'Result']
_HTML = Union[str, bytes]
_BaseHTML = str
_UserAgent = str
_DefaultEncoding = str
_URL = str
_RawHTML = bytes
_Encoding = str
_LXML = HtmlElement
_Text = str
_Search = Result
_Containing = Union[str, List[str]]
_Links = Set[str]
_Attrs = MutableMapping
_Next = Union['HTML', List[str]]
_NextSymbol = List[str]
# Sanity checking.
try:
assert sys.version_info.major == 3
assert sys.version_info.minor > 5
except AssertionError:
raise RuntimeError('Requests-HTML requires Python 3.6+!')
class MaxRetries(Exception):
def __init__(self, message):
self.message = message
class BaseParser:
"""A basic HTML/Element Parser, for Humans.
:param element: The element from which to base the parsing upon.
:param default_encoding: Which encoding to default to.
:param html: HTML from which to base the parsing upon (optional).
:param url: The URL from which the HTML originated, used for ``absolute_links``.
"""
def __init__(self, *, element, default_encoding: _DefaultEncoding = None, html: _HTML = None, url: _URL) -> None:
self.element = element
self.url = url
self.skip_anchors = True
self.default_encoding = default_encoding
self._encoding = None
self._html = html.encode(DEFAULT_ENCODING) if isinstance(html, str) else html
self._lxml = None
self._pq = None
@property
def raw_html(self) -> _RawHTML:
"""Bytes representation of the HTML content.
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self._html
else:
return etree.tostring(self.element, encoding='unicode').strip().encode(self.encoding)
@property
def html(self) -> _BaseHTML:
"""Unicode representation of the HTML content
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self.raw_html.decode(self.encoding, errors='replace')
else:
return etree.tostring(self.element, encoding='unicode').strip()
@html.setter
def html(self, html: str) -> None:
self._html = html.encode(self.encoding)
@raw_html.setter
def raw_html(self, html: bytes) -> None:
"""Property setter for self.html."""
self._html = html
@property
def encoding(self) -> _Encoding:
"""The encoding string to be used, extracted from the HTML and
:class:`HTMLResponse <HTMLResponse>` headers.
"""
if self._encoding:
return self._encoding
# Scan meta tags for charset.
if self._html:
self._encoding = html_to_unicode(self.default_encoding, self._html)[0]
# Fall back to requests' detected encoding if decode fails.
try:
self.raw_html.decode(self.encoding, errors='replace')
except UnicodeDecodeError:
self._encoding = self.default_encoding
return self._encoding if self._encoding else self.default_encoding
@encoding.setter
def encoding(self, enc: str) -> None:
"""Property setter for self.encoding."""
self._encoding = enc
@property
def pq(self) -> PyQuery:
"""`PyQuery <https://pythonhosted.org/pyquery/>`_ representation
of the :class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._pq is None:
self._pq = PyQuery(self.lxml)
return self._pq
@property
def lxml(self) -> HtmlElement:
"""`lxml <http://lxml.de>`_ representation of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._lxml is None:
try:
self._lxml = soup_parse(self.html, features='html.parser')
except ValueError:
self._lxml = lxml.html.fromstring(self.raw_html)
return self._lxml
@property
def text(self) -> _Text:
"""The text content of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.pq.text()
@property
def full_text(self) -> _Text:
"""The full text content (including links) of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.lxml.text_content()
def find(self, selector: str = "*", *, containing: _Containing = None, clean: bool = False, first: bool = False, _encoding: str = None) -> _Find:
"""Given a CSS Selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: CSS Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param containing: If specified, only return elements that contain the provided text.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
Example CSS Selectors:
- ``a``
- ``a.someClass``
- ``a#someID``
- ``a[target=_blank]``
See W3School's `CSS Selectors Reference
<https://www.w3schools.com/cssref/css_selectors.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
# Convert a single containing into a list.
if isinstance(containing, str):
containing = [containing]
encoding = _encoding or self.encoding
elements = [
Element(element=found, url=self.url, default_encoding=encoding)
for found in self.pq(selector)
]
if containing:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
if any([c.lower() in element.full_text.lower() for c in containing]):
elements.append(element)
elements.reverse()
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def xpath(self, selector: str, *, clean: bool = False, first: bool = False, _encoding: str = None) -> _XPath:
"""Given an XPath selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: XPath Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
If a sub-selector is specified (e.g. ``//a/@href``), a simple
list of results is returned.
See W3School's `XPath Examples
<https://www.w3schools.com/xml/xpath_examples.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
selected = self.lxml.xpath(selector)
elements = [
Element(element=selection, url=self.url, default_encoding=_encoding or self.encoding)
if not isinstance(selection, etree._ElementUnicodeResult) else str(selection)
for selection in selected
]
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def search(self, template: str) -> Result:
"""Search the :class:`Element <Element>` for the given Parse template.
:param template: The Parse template to use.
"""
return parse_search(template, self.html)
def search_all(self, template: str) -> _Result:
"""Search the :class:`Element <Element>` (multiple times) for the given parse
template.
:param template: The Parse template to use.
"""
return [r for r in findall(template, self.html)]
@property
def links(self) -> _Links:
"""All found links on page, in as–is form."""
def gen():
for link in self.find('a'):
try:
href = link.attrs['href'].strip()
if href and not (href.startswith('#') and self.skip_anchors) and not href.startswith(('javascript:', 'mailto:')):
yield href
except KeyError:
pass
return set(gen())
def _make_absolute(self, link):
"""Makes a given link absolute."""
# Parse the link with stdlib.
parsed = urlparse(link)._asdict()
# If link is relative, then join it with base_url.
if not parsed['netloc']:
return urljoin(self.base_url, link)
# Link is absolute; if it lacks a scheme, add one from base_url.
if not parsed['scheme']:
parsed['scheme'] = urlparse(self.base_url).scheme
# Reconstruct the URL to incorporate the new scheme.
parsed = (v for v in parsed.values())
return urlunparse(parsed)
# Link is absolute and complete with scheme; nothing to be done here.
return link
@property
def absolute_links(self) -> _Links:
"""All found links on page, in absolute form
(`learn more <https://www.navegabem.com/absolute-or-relative-links.html>`_).
"""
def gen():
for link in self.links:
yield self._make_absolute(link)
return set(gen())
@property
def base_url(self) -> _URL:
"""The base URL for the page. Supports the ``<base>`` tag
(`learn more <https://www.w3schools.com/tags/tag_base.asp>`_)."""
# Support for <base> tag.
base = self.find('base', first=True)
if base:
result = base.attrs.get('href', '').strip()
if result:
return result
# Parse the url to separate out the path
parsed = urlparse(self.url)._asdict()
# Remove any part of the path after the last '/'
parsed['path'] = '/'.join(parsed['path'].split('/')[:-1]) + '/'
# Reconstruct the url with the modified path
parsed = (v for v in parsed.values())
url = urlunparse(parsed)
return url
class Element(BaseParser):
"""An element of HTML.
:param element: The element from which to base the parsing upon.
:param url: The URL from which the HTML originated, used for ``absolute_links``.
:param default_encoding: Which encoding to default to.
"""
__slots__ = [
'element', 'url', 'skip_anchors', 'default_encoding', '_encoding',
'_html', '_lxml', '_pq', '_attrs', 'session'
]
def __init__(self, *, element, url: _URL, default_encoding: _DefaultEncoding = None) -> None:
super(Element, self).__init__(element=element, url=url, default_encoding=default_encoding)
self.element = element
self.tag = element.tag
self.lineno = element.sourceline
self._attrs = None
def __repr__(self) -> str:
attrs = ['{}={}'.format(attr, repr(self.attrs[attr])) for attr in self.attrs]
return "<Element {} {}>".format(repr(self.element.tag), ' '.join(attrs))
@property
def attrs(self) -> _Attrs:
"""Returns a dictionary of the attributes of the :class:`Element <Element>`
(`learn more <https://www.w3schools.com/tags/ref_attributes.asp>`_).
"""
if self._attrs is None:
self._attrs = {k: v for k, v in self.element.items()}
# Split class and rel up, as there are ussually many of them:
for attr in ['class', 'rel']:
if attr in self._attrs:
self._attrs[attr] = tuple(self._attrs[attr].split())
return self._attrs
class HTML(BaseParser):
"""An HTML document, ready for parsing.
:param url: The URL from which the HTML originated, used for ``absolute_links``.
:param html: HTML from which to base the parsing upon (optional).
:param default_encoding: Which encoding to default to.
"""
def __init__(self, *, session: Union['HTMLSession', 'AsyncHTMLSession'] = None, url: str = DEFAULT_URL, html: _HTML, default_encoding: str = DEFAULT_ENCODING, async_: bool = False) -> None:
# Convert incoming unicode HTML into bytes.
if isinstance(html, str):
html = html.encode(DEFAULT_ENCODING)
pq = PyQuery(html)
super(HTML, self).__init__(
element=pq('html') or pq.wrapAll('<html></html>')('html'),
html=html,
url=url,
default_encoding=default_encoding
)
self.session = session or async_ and AsyncHTMLSession() or HTMLSession()
self.page = None
self.next_symbol = DEFAULT_NEXT_SYMBOL
def __repr__(self) -> str:
return f"<HTML url={self.url!r}>"
def next(self, fetch: bool = False, next_symbol: _NextSymbol = DEFAULT_NEXT_SYMBOL) -> _Next:
"""Attempts to find the next page, if there is one. If ``fetch``
is ``True`` (default), returns :class:`HTML <HTML>` object of
next page. If ``fetch`` is ``False``, simply returns the next URL.
"""
def get_next():
candidates = self.find('a', containing=next_symbol)
for candidate in candidates:
if candidate.attrs.get('href'):
# Support 'next' rel (e.g. reddit).
if 'next' in candidate.attrs.get('rel', []):
return candidate.attrs['href']
# Support 'next' in classnames.
for _class in candidate.attrs.get('class', []):
if 'next' in _class:
return candidate.attrs['href']
if 'page' in candidate.attrs['href']:
return candidate.attrs['href']
try:
# Resort to the last candidate.
return candidates[-1].attrs['href']
except IndexError:
return None
__next = get_next()
if __next:
url = self._make_absolute(__next)
else:
return None
if fetch:
return self.session.get(url)
else:
return url
def __iter__(self):
next = self
while True:
yield next
try:
next = next.next(fetch=True, next_symbol=self.next_symbol).html
except AttributeError:
break
def __next__(self):
return self.next(fetch=True, next_symbol=self.next_symbol).html
def __aiter__(self):
return self
async def __anext__(self):
while True:
url = self.next(fetch=False, next_symbol=self.next_symbol)
if not url:
break
response = await self.session.get(url)
return response.html
def add_next_symbol(self, next_symbol):
self.next_symbol.append(next_symbol)
async def _async_render(self, *, url: str, script: str = None, scrolldown, sleep: int, wait: float, reload, content: Optional[str], timeout: Union[float, int], keep_page: bool):
""" Handle page creation and js rendering. Internal use for render/arender methods. """
try:
page = await self.browser.newPage()
# Wait before rendering the page, to prevent timeouts.
await asyncio.sleep(wait)
# Load the given page (GET request, obviously.)
if reload:
await page.goto(url, options={'timeout': int(timeout * 1000)})
else:
await page.goto(f'data:text/html,{self.html}', options={'timeout': int(timeout * 1000)})
result = None
if script:
result = await page.evaluate(script)
if scrolldown:
for _ in range(scrolldown):
await page._keyboard.down('PageDown')
await asyncio.sleep(sleep)
else:
await asyncio.sleep(sleep)
if scrolldown:
await page._keyboard.up('PageDown')
# Return the content of the page, JavaScript evaluated.
content = await page.content()
if not keep_page:
await page.close()
page = None
return content, result, page
except TimeoutError:
await page.close()
page = None
return None
def render(self, retries: int = 8, script: str = None, wait: float = 0.2, scrolldown=False, sleep: int = 0, reload: bool = True, timeout: Union[float, int] = 8.0, keep_page: bool = False):
"""Reloads the response in Chromium, and replaces HTML content
with an updated version, with JavaScript executed.
:param retries: The number of times to retry loading the page in Chromium.
:param script: JavaScript to execute upon page load (optional).
:param wait: The number of seconds to wait before loading the page, preventing timeouts (optional).
:param scrolldown: Integer, if provided, of how many times to page down.
:param sleep: Integer, if provided, of how many long to sleep after initial render.
:param reload: If ``False``, content will not be loaded from the browser, but will be provided from memory.
:param keep_page: If ``True`` will allow you to interact with the browser page through ``r.html.page``.
If ``scrolldown`` is specified, the page will scrolldown the specified
number of times, after sleeping the specified amount of time
(e.g. ``scrolldown=10, sleep=1``).
If just ``sleep`` is provided, the rendering will wait *n* seconds, before
returning.
If ``script`` is specified, it will execute the provided JavaScript at
runtime. Example:
.. code-block:: python
script = \"\"\"
() => {
return {
width: document.documentElement.clientWidth,
height: document.documentElement.clientHeight,
deviceScaleFactor: window.devicePixelRatio,
}
}
\"\"\"
Returns the return value of the executed ``script``, if any is provided:
.. code-block:: python
>>> r.html.render(script=script)
{'width': 800, 'height': 600, 'deviceScaleFactor': 1}
Warning: the first time you run this method, it will download
Chromium into your home directory (``~/.pyppeteer``).
"""
self.browser = self.session.browser # Automatically create a event loop and browser
content = None
# Automatically set Reload to False, if example URL is being used.
if self.url == DEFAULT_URL:
reload = False
for i in range(retries):
if not content:
try:
content, result, page = self.session.loop.run_until_complete(self._async_render(url=self.url, script=script, sleep=sleep, wait=wait, content=self.html, reload=reload, scrolldown=scrolldown, timeout=timeout, keep_page=keep_page))
except TypeError:
pass
else:
break
if not content:
raise MaxRetries("Unable to render the page. Try increasing timeout")
html = HTML(url=self.url, html=content.encode(DEFAULT_ENCODING), default_encoding=DEFAULT_ENCODING)
self.__dict__.update(html.__dict__)
self.page = page
return result
async def arender(self, retries: int = 8, script: str = None, wait: float = 0.2, scrolldown=False, sleep: int = 0, reload: bool = True, timeout: Union[float, int] = 8.0, keep_page: bool = False):
""" Async version of render. Takes same parameters. """
self.browser = await self.session.browser
content = None
# Automatically set Reload to False, if example URL is being used.
if self.url == DEFAULT_URL:
reload = False
for _ in range(retries):
if not content:
try:
content, result, page = await self._async_render(url=self.url, script=script, sleep=sleep, wait=wait, content=self.html, reload=reload, scrolldown=scrolldown, timeout=timeout, keep_page=keep_page)
except TypeError:
pass
else:
break
if not content:
raise MaxRetries("Unable to render the page. Try increasing timeout")
html = HTML(url=self.url, html=content.encode(DEFAULT_ENCODING), default_encoding=DEFAULT_ENCODING)
self.__dict__.update(html.__dict__)
self.page = page
return result
class HTMLResponse(requests.Response):
"""An HTML-enabled :class:`requests.Response <requests.Response>` object.
Effectively the same, but with an intelligent ``.html`` property added.
"""
def __init__(self, session: Union['HTMLSession', 'AsyncHTMLSession']) -> None:
super(HTMLResponse, self).__init__()
self._html = None # type: HTML
self.session = session
@property
def html(self) -> HTML:
if not self._html:
self._html = HTML(session=self.session, url=self.url, html=self.content, default_encoding=self.encoding)
return self._html
@classmethod
def _from_response(cls, response, session: Union['HTMLSession', 'AsyncHTMLSession']):
html_r = cls(session=session)
html_r.__dict__.update(response.__dict__)
return html_r
def _get_first_or_list(l, first=False):
if first:
try:
return l[0]
except IndexError:
return None
else:
return l
class BaseSession(requests.Session):
""" A consumable session, for cookie persistence and connection pooling,
amongst other things.
"""
def __init__(self, mock_browser : bool = True, verify : bool = True,
browser_args : list = ['--no-sandbox']):
super().__init__()
# Mock a web browser's user agent.
if mock_browser:
self.headers['User-Agent'] = user_agent()
self.hooks['response'].append(self.response_hook)
self.verify = verify
self.__browser_args = browser_args
def response_hook(self, response, **kwargs) -> HTMLResponse:
""" Change response enconding and replace it by a HTMLResponse. """
if not response.encoding:
response.encoding = DEFAULT_ENCODING
return HTMLResponse._from_response(response, self)
@property
async def browser(self):
if not hasattr(self, "_browser"):
self._browser = await pyppeteer.launch(ignoreHTTPSErrors=not(self.verify), headless=True, args=self.__browser_args)
return self._browser
class HTMLSession(BaseSession):
def __init__(self, **kwargs):
super(HTMLSession, self).__init__(**kwargs)
@property
def browser(self):
if not hasattr(self, "_browser"):
self.loop = asyncio.get_event_loop()
if self.loop.is_running():
raise RuntimeError("Cannot use HTMLSession within an existing event loop. Use AsyncHTMLSession instead.")
self._browser = self.loop.run_until_complete(super().browser)
return self._browser
def close(self):
""" If a browser was created close it first. """
if hasattr(self, "_browser"):
self.loop.run_until_complete(self._browser.close())
super().close()
class AsyncHTMLSession(BaseSession):
""" An async consumable session. """
def __init__(self, loop=None, workers=None,
mock_browser: bool = True, *args, **kwargs):
""" Set or create an event loop and a thread pool.
:param loop: Asyncio loop to use.
:param workers: Amount of threads to use for executing async calls.
If not pass it will default to the number of processors on the
machine, multiplied by 5. """
super().__init__(*args, **kwargs)
self.loop = loop or asyncio.get_event_loop()
self.thread_pool = ThreadPoolExecutor(max_workers=workers)
def request(self, *args, **kwargs):
""" Partial original request func and run it in a thread. """
func = partial(super().request, *args, **kwargs)
return self.loop.run_in_executor(self.thread_pool, func)
async def close(self):
""" If a browser was created close it first. """
if hasattr(self, "_browser"):
await self._browser.close()
super().close()
def run(self, *coros):
""" Pass in all the coroutines you want to run, it will wrap each one
in a task, run it and wait for the result. Return a list with all
results, this is returned in the same order coros are passed in. """
tasks = [
asyncio.ensure_future(coro()) for coro in coros
]
done, _ = self.loop.run_until_complete(asyncio.wait(tasks))
return [t.result() for t in done]
|
kennethreitz/requests-html | requests_html.py | BaseParser.raw_html | python | def raw_html(self) -> _RawHTML:
if self._html:
return self._html
else:
return etree.tostring(self.element, encoding='unicode').strip().encode(self.encoding) | Bytes representation of the HTML content.
(`learn more <http://www.diveintopython3.net/strings.html>`_). | train | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L90-L97 | null | class BaseParser:
"""A basic HTML/Element Parser, for Humans.
:param element: The element from which to base the parsing upon.
:param default_encoding: Which encoding to default to.
:param html: HTML from which to base the parsing upon (optional).
:param url: The URL from which the HTML originated, used for ``absolute_links``.
"""
def __init__(self, *, element, default_encoding: _DefaultEncoding = None, html: _HTML = None, url: _URL) -> None:
self.element = element
self.url = url
self.skip_anchors = True
self.default_encoding = default_encoding
self._encoding = None
self._html = html.encode(DEFAULT_ENCODING) if isinstance(html, str) else html
self._lxml = None
self._pq = None
@property
@property
def html(self) -> _BaseHTML:
"""Unicode representation of the HTML content
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self.raw_html.decode(self.encoding, errors='replace')
else:
return etree.tostring(self.element, encoding='unicode').strip()
@html.setter
def html(self, html: str) -> None:
self._html = html.encode(self.encoding)
@raw_html.setter
def raw_html(self, html: bytes) -> None:
"""Property setter for self.html."""
self._html = html
@property
def encoding(self) -> _Encoding:
"""The encoding string to be used, extracted from the HTML and
:class:`HTMLResponse <HTMLResponse>` headers.
"""
if self._encoding:
return self._encoding
# Scan meta tags for charset.
if self._html:
self._encoding = html_to_unicode(self.default_encoding, self._html)[0]
# Fall back to requests' detected encoding if decode fails.
try:
self.raw_html.decode(self.encoding, errors='replace')
except UnicodeDecodeError:
self._encoding = self.default_encoding
return self._encoding if self._encoding else self.default_encoding
@encoding.setter
def encoding(self, enc: str) -> None:
"""Property setter for self.encoding."""
self._encoding = enc
@property
def pq(self) -> PyQuery:
"""`PyQuery <https://pythonhosted.org/pyquery/>`_ representation
of the :class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._pq is None:
self._pq = PyQuery(self.lxml)
return self._pq
@property
def lxml(self) -> HtmlElement:
"""`lxml <http://lxml.de>`_ representation of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._lxml is None:
try:
self._lxml = soup_parse(self.html, features='html.parser')
except ValueError:
self._lxml = lxml.html.fromstring(self.raw_html)
return self._lxml
@property
def text(self) -> _Text:
"""The text content of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.pq.text()
@property
def full_text(self) -> _Text:
"""The full text content (including links) of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.lxml.text_content()
def find(self, selector: str = "*", *, containing: _Containing = None, clean: bool = False, first: bool = False, _encoding: str = None) -> _Find:
"""Given a CSS Selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: CSS Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param containing: If specified, only return elements that contain the provided text.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
Example CSS Selectors:
- ``a``
- ``a.someClass``
- ``a#someID``
- ``a[target=_blank]``
See W3School's `CSS Selectors Reference
<https://www.w3schools.com/cssref/css_selectors.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
# Convert a single containing into a list.
if isinstance(containing, str):
containing = [containing]
encoding = _encoding or self.encoding
elements = [
Element(element=found, url=self.url, default_encoding=encoding)
for found in self.pq(selector)
]
if containing:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
if any([c.lower() in element.full_text.lower() for c in containing]):
elements.append(element)
elements.reverse()
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def xpath(self, selector: str, *, clean: bool = False, first: bool = False, _encoding: str = None) -> _XPath:
"""Given an XPath selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: XPath Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
If a sub-selector is specified (e.g. ``//a/@href``), a simple
list of results is returned.
See W3School's `XPath Examples
<https://www.w3schools.com/xml/xpath_examples.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
selected = self.lxml.xpath(selector)
elements = [
Element(element=selection, url=self.url, default_encoding=_encoding or self.encoding)
if not isinstance(selection, etree._ElementUnicodeResult) else str(selection)
for selection in selected
]
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def search(self, template: str) -> Result:
"""Search the :class:`Element <Element>` for the given Parse template.
:param template: The Parse template to use.
"""
return parse_search(template, self.html)
def search_all(self, template: str) -> _Result:
"""Search the :class:`Element <Element>` (multiple times) for the given parse
template.
:param template: The Parse template to use.
"""
return [r for r in findall(template, self.html)]
@property
def links(self) -> _Links:
"""All found links on page, in as–is form."""
def gen():
for link in self.find('a'):
try:
href = link.attrs['href'].strip()
if href and not (href.startswith('#') and self.skip_anchors) and not href.startswith(('javascript:', 'mailto:')):
yield href
except KeyError:
pass
return set(gen())
def _make_absolute(self, link):
"""Makes a given link absolute."""
# Parse the link with stdlib.
parsed = urlparse(link)._asdict()
# If link is relative, then join it with base_url.
if not parsed['netloc']:
return urljoin(self.base_url, link)
# Link is absolute; if it lacks a scheme, add one from base_url.
if not parsed['scheme']:
parsed['scheme'] = urlparse(self.base_url).scheme
# Reconstruct the URL to incorporate the new scheme.
parsed = (v for v in parsed.values())
return urlunparse(parsed)
# Link is absolute and complete with scheme; nothing to be done here.
return link
@property
def absolute_links(self) -> _Links:
"""All found links on page, in absolute form
(`learn more <https://www.navegabem.com/absolute-or-relative-links.html>`_).
"""
def gen():
for link in self.links:
yield self._make_absolute(link)
return set(gen())
@property
def base_url(self) -> _URL:
"""The base URL for the page. Supports the ``<base>`` tag
(`learn more <https://www.w3schools.com/tags/tag_base.asp>`_)."""
# Support for <base> tag.
base = self.find('base', first=True)
if base:
result = base.attrs.get('href', '').strip()
if result:
return result
# Parse the url to separate out the path
parsed = urlparse(self.url)._asdict()
# Remove any part of the path after the last '/'
parsed['path'] = '/'.join(parsed['path'].split('/')[:-1]) + '/'
# Reconstruct the url with the modified path
parsed = (v for v in parsed.values())
url = urlunparse(parsed)
return url
|
kennethreitz/requests-html | requests_html.py | BaseParser.html | python | def html(self) -> _BaseHTML:
if self._html:
return self.raw_html.decode(self.encoding, errors='replace')
else:
return etree.tostring(self.element, encoding='unicode').strip() | Unicode representation of the HTML content
(`learn more <http://www.diveintopython3.net/strings.html>`_). | train | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L100-L107 | null | class BaseParser:
"""A basic HTML/Element Parser, for Humans.
:param element: The element from which to base the parsing upon.
:param default_encoding: Which encoding to default to.
:param html: HTML from which to base the parsing upon (optional).
:param url: The URL from which the HTML originated, used for ``absolute_links``.
"""
def __init__(self, *, element, default_encoding: _DefaultEncoding = None, html: _HTML = None, url: _URL) -> None:
self.element = element
self.url = url
self.skip_anchors = True
self.default_encoding = default_encoding
self._encoding = None
self._html = html.encode(DEFAULT_ENCODING) if isinstance(html, str) else html
self._lxml = None
self._pq = None
@property
def raw_html(self) -> _RawHTML:
"""Bytes representation of the HTML content.
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self._html
else:
return etree.tostring(self.element, encoding='unicode').strip().encode(self.encoding)
@property
@html.setter
def html(self, html: str) -> None:
self._html = html.encode(self.encoding)
@raw_html.setter
def raw_html(self, html: bytes) -> None:
"""Property setter for self.html."""
self._html = html
@property
def encoding(self) -> _Encoding:
"""The encoding string to be used, extracted from the HTML and
:class:`HTMLResponse <HTMLResponse>` headers.
"""
if self._encoding:
return self._encoding
# Scan meta tags for charset.
if self._html:
self._encoding = html_to_unicode(self.default_encoding, self._html)[0]
# Fall back to requests' detected encoding if decode fails.
try:
self.raw_html.decode(self.encoding, errors='replace')
except UnicodeDecodeError:
self._encoding = self.default_encoding
return self._encoding if self._encoding else self.default_encoding
@encoding.setter
def encoding(self, enc: str) -> None:
"""Property setter for self.encoding."""
self._encoding = enc
@property
def pq(self) -> PyQuery:
"""`PyQuery <https://pythonhosted.org/pyquery/>`_ representation
of the :class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._pq is None:
self._pq = PyQuery(self.lxml)
return self._pq
@property
def lxml(self) -> HtmlElement:
"""`lxml <http://lxml.de>`_ representation of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._lxml is None:
try:
self._lxml = soup_parse(self.html, features='html.parser')
except ValueError:
self._lxml = lxml.html.fromstring(self.raw_html)
return self._lxml
@property
def text(self) -> _Text:
"""The text content of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.pq.text()
@property
def full_text(self) -> _Text:
"""The full text content (including links) of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.lxml.text_content()
def find(self, selector: str = "*", *, containing: _Containing = None, clean: bool = False, first: bool = False, _encoding: str = None) -> _Find:
"""Given a CSS Selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: CSS Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param containing: If specified, only return elements that contain the provided text.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
Example CSS Selectors:
- ``a``
- ``a.someClass``
- ``a#someID``
- ``a[target=_blank]``
See W3School's `CSS Selectors Reference
<https://www.w3schools.com/cssref/css_selectors.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
# Convert a single containing into a list.
if isinstance(containing, str):
containing = [containing]
encoding = _encoding or self.encoding
elements = [
Element(element=found, url=self.url, default_encoding=encoding)
for found in self.pq(selector)
]
if containing:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
if any([c.lower() in element.full_text.lower() for c in containing]):
elements.append(element)
elements.reverse()
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def xpath(self, selector: str, *, clean: bool = False, first: bool = False, _encoding: str = None) -> _XPath:
"""Given an XPath selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: XPath Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
If a sub-selector is specified (e.g. ``//a/@href``), a simple
list of results is returned.
See W3School's `XPath Examples
<https://www.w3schools.com/xml/xpath_examples.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
selected = self.lxml.xpath(selector)
elements = [
Element(element=selection, url=self.url, default_encoding=_encoding or self.encoding)
if not isinstance(selection, etree._ElementUnicodeResult) else str(selection)
for selection in selected
]
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def search(self, template: str) -> Result:
"""Search the :class:`Element <Element>` for the given Parse template.
:param template: The Parse template to use.
"""
return parse_search(template, self.html)
def search_all(self, template: str) -> _Result:
"""Search the :class:`Element <Element>` (multiple times) for the given parse
template.
:param template: The Parse template to use.
"""
return [r for r in findall(template, self.html)]
@property
def links(self) -> _Links:
"""All found links on page, in as–is form."""
def gen():
for link in self.find('a'):
try:
href = link.attrs['href'].strip()
if href and not (href.startswith('#') and self.skip_anchors) and not href.startswith(('javascript:', 'mailto:')):
yield href
except KeyError:
pass
return set(gen())
def _make_absolute(self, link):
"""Makes a given link absolute."""
# Parse the link with stdlib.
parsed = urlparse(link)._asdict()
# If link is relative, then join it with base_url.
if not parsed['netloc']:
return urljoin(self.base_url, link)
# Link is absolute; if it lacks a scheme, add one from base_url.
if not parsed['scheme']:
parsed['scheme'] = urlparse(self.base_url).scheme
# Reconstruct the URL to incorporate the new scheme.
parsed = (v for v in parsed.values())
return urlunparse(parsed)
# Link is absolute and complete with scheme; nothing to be done here.
return link
@property
def absolute_links(self) -> _Links:
"""All found links on page, in absolute form
(`learn more <https://www.navegabem.com/absolute-or-relative-links.html>`_).
"""
def gen():
for link in self.links:
yield self._make_absolute(link)
return set(gen())
@property
def base_url(self) -> _URL:
"""The base URL for the page. Supports the ``<base>`` tag
(`learn more <https://www.w3schools.com/tags/tag_base.asp>`_)."""
# Support for <base> tag.
base = self.find('base', first=True)
if base:
result = base.attrs.get('href', '').strip()
if result:
return result
# Parse the url to separate out the path
parsed = urlparse(self.url)._asdict()
# Remove any part of the path after the last '/'
parsed['path'] = '/'.join(parsed['path'].split('/')[:-1]) + '/'
# Reconstruct the url with the modified path
parsed = (v for v in parsed.values())
url = urlunparse(parsed)
return url
|
kennethreitz/requests-html | requests_html.py | BaseParser.encoding | python | def encoding(self) -> _Encoding:
if self._encoding:
return self._encoding
# Scan meta tags for charset.
if self._html:
self._encoding = html_to_unicode(self.default_encoding, self._html)[0]
# Fall back to requests' detected encoding if decode fails.
try:
self.raw_html.decode(self.encoding, errors='replace')
except UnicodeDecodeError:
self._encoding = self.default_encoding
return self._encoding if self._encoding else self.default_encoding | The encoding string to be used, extracted from the HTML and
:class:`HTMLResponse <HTMLResponse>` headers. | train | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L119-L136 | null | class BaseParser:
"""A basic HTML/Element Parser, for Humans.
:param element: The element from which to base the parsing upon.
:param default_encoding: Which encoding to default to.
:param html: HTML from which to base the parsing upon (optional).
:param url: The URL from which the HTML originated, used for ``absolute_links``.
"""
def __init__(self, *, element, default_encoding: _DefaultEncoding = None, html: _HTML = None, url: _URL) -> None:
self.element = element
self.url = url
self.skip_anchors = True
self.default_encoding = default_encoding
self._encoding = None
self._html = html.encode(DEFAULT_ENCODING) if isinstance(html, str) else html
self._lxml = None
self._pq = None
@property
def raw_html(self) -> _RawHTML:
"""Bytes representation of the HTML content.
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self._html
else:
return etree.tostring(self.element, encoding='unicode').strip().encode(self.encoding)
@property
def html(self) -> _BaseHTML:
"""Unicode representation of the HTML content
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self.raw_html.decode(self.encoding, errors='replace')
else:
return etree.tostring(self.element, encoding='unicode').strip()
@html.setter
def html(self, html: str) -> None:
self._html = html.encode(self.encoding)
@raw_html.setter
def raw_html(self, html: bytes) -> None:
"""Property setter for self.html."""
self._html = html
@property
@encoding.setter
def encoding(self, enc: str) -> None:
"""Property setter for self.encoding."""
self._encoding = enc
@property
def pq(self) -> PyQuery:
"""`PyQuery <https://pythonhosted.org/pyquery/>`_ representation
of the :class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._pq is None:
self._pq = PyQuery(self.lxml)
return self._pq
@property
def lxml(self) -> HtmlElement:
"""`lxml <http://lxml.de>`_ representation of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._lxml is None:
try:
self._lxml = soup_parse(self.html, features='html.parser')
except ValueError:
self._lxml = lxml.html.fromstring(self.raw_html)
return self._lxml
@property
def text(self) -> _Text:
"""The text content of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.pq.text()
@property
def full_text(self) -> _Text:
"""The full text content (including links) of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.lxml.text_content()
def find(self, selector: str = "*", *, containing: _Containing = None, clean: bool = False, first: bool = False, _encoding: str = None) -> _Find:
"""Given a CSS Selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: CSS Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param containing: If specified, only return elements that contain the provided text.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
Example CSS Selectors:
- ``a``
- ``a.someClass``
- ``a#someID``
- ``a[target=_blank]``
See W3School's `CSS Selectors Reference
<https://www.w3schools.com/cssref/css_selectors.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
# Convert a single containing into a list.
if isinstance(containing, str):
containing = [containing]
encoding = _encoding or self.encoding
elements = [
Element(element=found, url=self.url, default_encoding=encoding)
for found in self.pq(selector)
]
if containing:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
if any([c.lower() in element.full_text.lower() for c in containing]):
elements.append(element)
elements.reverse()
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def xpath(self, selector: str, *, clean: bool = False, first: bool = False, _encoding: str = None) -> _XPath:
"""Given an XPath selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: XPath Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
If a sub-selector is specified (e.g. ``//a/@href``), a simple
list of results is returned.
See W3School's `XPath Examples
<https://www.w3schools.com/xml/xpath_examples.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
selected = self.lxml.xpath(selector)
elements = [
Element(element=selection, url=self.url, default_encoding=_encoding or self.encoding)
if not isinstance(selection, etree._ElementUnicodeResult) else str(selection)
for selection in selected
]
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def search(self, template: str) -> Result:
"""Search the :class:`Element <Element>` for the given Parse template.
:param template: The Parse template to use.
"""
return parse_search(template, self.html)
def search_all(self, template: str) -> _Result:
"""Search the :class:`Element <Element>` (multiple times) for the given parse
template.
:param template: The Parse template to use.
"""
return [r for r in findall(template, self.html)]
@property
def links(self) -> _Links:
"""All found links on page, in as–is form."""
def gen():
for link in self.find('a'):
try:
href = link.attrs['href'].strip()
if href and not (href.startswith('#') and self.skip_anchors) and not href.startswith(('javascript:', 'mailto:')):
yield href
except KeyError:
pass
return set(gen())
def _make_absolute(self, link):
"""Makes a given link absolute."""
# Parse the link with stdlib.
parsed = urlparse(link)._asdict()
# If link is relative, then join it with base_url.
if not parsed['netloc']:
return urljoin(self.base_url, link)
# Link is absolute; if it lacks a scheme, add one from base_url.
if not parsed['scheme']:
parsed['scheme'] = urlparse(self.base_url).scheme
# Reconstruct the URL to incorporate the new scheme.
parsed = (v for v in parsed.values())
return urlunparse(parsed)
# Link is absolute and complete with scheme; nothing to be done here.
return link
@property
def absolute_links(self) -> _Links:
"""All found links on page, in absolute form
(`learn more <https://www.navegabem.com/absolute-or-relative-links.html>`_).
"""
def gen():
for link in self.links:
yield self._make_absolute(link)
return set(gen())
@property
def base_url(self) -> _URL:
"""The base URL for the page. Supports the ``<base>`` tag
(`learn more <https://www.w3schools.com/tags/tag_base.asp>`_)."""
# Support for <base> tag.
base = self.find('base', first=True)
if base:
result = base.attrs.get('href', '').strip()
if result:
return result
# Parse the url to separate out the path
parsed = urlparse(self.url)._asdict()
# Remove any part of the path after the last '/'
parsed['path'] = '/'.join(parsed['path'].split('/')[:-1]) + '/'
# Reconstruct the url with the modified path
parsed = (v for v in parsed.values())
url = urlunparse(parsed)
return url
|
kennethreitz/requests-html | requests_html.py | BaseParser.pq | python | def pq(self) -> PyQuery:
if self._pq is None:
self._pq = PyQuery(self.lxml)
return self._pq | `PyQuery <https://pythonhosted.org/pyquery/>`_ representation
of the :class:`Element <Element>` or :class:`HTML <HTML>`. | train | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L144-L151 | null | class BaseParser:
"""A basic HTML/Element Parser, for Humans.
:param element: The element from which to base the parsing upon.
:param default_encoding: Which encoding to default to.
:param html: HTML from which to base the parsing upon (optional).
:param url: The URL from which the HTML originated, used for ``absolute_links``.
"""
def __init__(self, *, element, default_encoding: _DefaultEncoding = None, html: _HTML = None, url: _URL) -> None:
self.element = element
self.url = url
self.skip_anchors = True
self.default_encoding = default_encoding
self._encoding = None
self._html = html.encode(DEFAULT_ENCODING) if isinstance(html, str) else html
self._lxml = None
self._pq = None
@property
def raw_html(self) -> _RawHTML:
"""Bytes representation of the HTML content.
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self._html
else:
return etree.tostring(self.element, encoding='unicode').strip().encode(self.encoding)
@property
def html(self) -> _BaseHTML:
"""Unicode representation of the HTML content
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self.raw_html.decode(self.encoding, errors='replace')
else:
return etree.tostring(self.element, encoding='unicode').strip()
@html.setter
def html(self, html: str) -> None:
self._html = html.encode(self.encoding)
@raw_html.setter
def raw_html(self, html: bytes) -> None:
"""Property setter for self.html."""
self._html = html
@property
def encoding(self) -> _Encoding:
"""The encoding string to be used, extracted from the HTML and
:class:`HTMLResponse <HTMLResponse>` headers.
"""
if self._encoding:
return self._encoding
# Scan meta tags for charset.
if self._html:
self._encoding = html_to_unicode(self.default_encoding, self._html)[0]
# Fall back to requests' detected encoding if decode fails.
try:
self.raw_html.decode(self.encoding, errors='replace')
except UnicodeDecodeError:
self._encoding = self.default_encoding
return self._encoding if self._encoding else self.default_encoding
@encoding.setter
def encoding(self, enc: str) -> None:
"""Property setter for self.encoding."""
self._encoding = enc
@property
@property
def lxml(self) -> HtmlElement:
"""`lxml <http://lxml.de>`_ representation of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._lxml is None:
try:
self._lxml = soup_parse(self.html, features='html.parser')
except ValueError:
self._lxml = lxml.html.fromstring(self.raw_html)
return self._lxml
@property
def text(self) -> _Text:
"""The text content of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.pq.text()
@property
def full_text(self) -> _Text:
"""The full text content (including links) of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.lxml.text_content()
def find(self, selector: str = "*", *, containing: _Containing = None, clean: bool = False, first: bool = False, _encoding: str = None) -> _Find:
"""Given a CSS Selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: CSS Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param containing: If specified, only return elements that contain the provided text.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
Example CSS Selectors:
- ``a``
- ``a.someClass``
- ``a#someID``
- ``a[target=_blank]``
See W3School's `CSS Selectors Reference
<https://www.w3schools.com/cssref/css_selectors.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
# Convert a single containing into a list.
if isinstance(containing, str):
containing = [containing]
encoding = _encoding or self.encoding
elements = [
Element(element=found, url=self.url, default_encoding=encoding)
for found in self.pq(selector)
]
if containing:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
if any([c.lower() in element.full_text.lower() for c in containing]):
elements.append(element)
elements.reverse()
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def xpath(self, selector: str, *, clean: bool = False, first: bool = False, _encoding: str = None) -> _XPath:
"""Given an XPath selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: XPath Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
If a sub-selector is specified (e.g. ``//a/@href``), a simple
list of results is returned.
See W3School's `XPath Examples
<https://www.w3schools.com/xml/xpath_examples.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
selected = self.lxml.xpath(selector)
elements = [
Element(element=selection, url=self.url, default_encoding=_encoding or self.encoding)
if not isinstance(selection, etree._ElementUnicodeResult) else str(selection)
for selection in selected
]
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def search(self, template: str) -> Result:
"""Search the :class:`Element <Element>` for the given Parse template.
:param template: The Parse template to use.
"""
return parse_search(template, self.html)
def search_all(self, template: str) -> _Result:
"""Search the :class:`Element <Element>` (multiple times) for the given parse
template.
:param template: The Parse template to use.
"""
return [r for r in findall(template, self.html)]
@property
def links(self) -> _Links:
"""All found links on page, in as–is form."""
def gen():
for link in self.find('a'):
try:
href = link.attrs['href'].strip()
if href and not (href.startswith('#') and self.skip_anchors) and not href.startswith(('javascript:', 'mailto:')):
yield href
except KeyError:
pass
return set(gen())
def _make_absolute(self, link):
"""Makes a given link absolute."""
# Parse the link with stdlib.
parsed = urlparse(link)._asdict()
# If link is relative, then join it with base_url.
if not parsed['netloc']:
return urljoin(self.base_url, link)
# Link is absolute; if it lacks a scheme, add one from base_url.
if not parsed['scheme']:
parsed['scheme'] = urlparse(self.base_url).scheme
# Reconstruct the URL to incorporate the new scheme.
parsed = (v for v in parsed.values())
return urlunparse(parsed)
# Link is absolute and complete with scheme; nothing to be done here.
return link
@property
def absolute_links(self) -> _Links:
"""All found links on page, in absolute form
(`learn more <https://www.navegabem.com/absolute-or-relative-links.html>`_).
"""
def gen():
for link in self.links:
yield self._make_absolute(link)
return set(gen())
@property
def base_url(self) -> _URL:
"""The base URL for the page. Supports the ``<base>`` tag
(`learn more <https://www.w3schools.com/tags/tag_base.asp>`_)."""
# Support for <base> tag.
base = self.find('base', first=True)
if base:
result = base.attrs.get('href', '').strip()
if result:
return result
# Parse the url to separate out the path
parsed = urlparse(self.url)._asdict()
# Remove any part of the path after the last '/'
parsed['path'] = '/'.join(parsed['path'].split('/')[:-1]) + '/'
# Reconstruct the url with the modified path
parsed = (v for v in parsed.values())
url = urlunparse(parsed)
return url
|
kennethreitz/requests-html | requests_html.py | BaseParser.lxml | python | def lxml(self) -> HtmlElement:
if self._lxml is None:
try:
self._lxml = soup_parse(self.html, features='html.parser')
except ValueError:
self._lxml = lxml.html.fromstring(self.raw_html)
return self._lxml | `lxml <http://lxml.de>`_ representation of the
:class:`Element <Element>` or :class:`HTML <HTML>`. | train | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L154-L164 | null | class BaseParser:
"""A basic HTML/Element Parser, for Humans.
:param element: The element from which to base the parsing upon.
:param default_encoding: Which encoding to default to.
:param html: HTML from which to base the parsing upon (optional).
:param url: The URL from which the HTML originated, used for ``absolute_links``.
"""
def __init__(self, *, element, default_encoding: _DefaultEncoding = None, html: _HTML = None, url: _URL) -> None:
self.element = element
self.url = url
self.skip_anchors = True
self.default_encoding = default_encoding
self._encoding = None
self._html = html.encode(DEFAULT_ENCODING) if isinstance(html, str) else html
self._lxml = None
self._pq = None
@property
def raw_html(self) -> _RawHTML:
"""Bytes representation of the HTML content.
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self._html
else:
return etree.tostring(self.element, encoding='unicode').strip().encode(self.encoding)
@property
def html(self) -> _BaseHTML:
"""Unicode representation of the HTML content
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self.raw_html.decode(self.encoding, errors='replace')
else:
return etree.tostring(self.element, encoding='unicode').strip()
@html.setter
def html(self, html: str) -> None:
self._html = html.encode(self.encoding)
@raw_html.setter
def raw_html(self, html: bytes) -> None:
"""Property setter for self.html."""
self._html = html
@property
def encoding(self) -> _Encoding:
"""The encoding string to be used, extracted from the HTML and
:class:`HTMLResponse <HTMLResponse>` headers.
"""
if self._encoding:
return self._encoding
# Scan meta tags for charset.
if self._html:
self._encoding = html_to_unicode(self.default_encoding, self._html)[0]
# Fall back to requests' detected encoding if decode fails.
try:
self.raw_html.decode(self.encoding, errors='replace')
except UnicodeDecodeError:
self._encoding = self.default_encoding
return self._encoding if self._encoding else self.default_encoding
@encoding.setter
def encoding(self, enc: str) -> None:
"""Property setter for self.encoding."""
self._encoding = enc
@property
def pq(self) -> PyQuery:
"""`PyQuery <https://pythonhosted.org/pyquery/>`_ representation
of the :class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._pq is None:
self._pq = PyQuery(self.lxml)
return self._pq
@property
@property
def text(self) -> _Text:
"""The text content of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.pq.text()
@property
def full_text(self) -> _Text:
"""The full text content (including links) of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.lxml.text_content()
def find(self, selector: str = "*", *, containing: _Containing = None, clean: bool = False, first: bool = False, _encoding: str = None) -> _Find:
"""Given a CSS Selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: CSS Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param containing: If specified, only return elements that contain the provided text.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
Example CSS Selectors:
- ``a``
- ``a.someClass``
- ``a#someID``
- ``a[target=_blank]``
See W3School's `CSS Selectors Reference
<https://www.w3schools.com/cssref/css_selectors.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
# Convert a single containing into a list.
if isinstance(containing, str):
containing = [containing]
encoding = _encoding or self.encoding
elements = [
Element(element=found, url=self.url, default_encoding=encoding)
for found in self.pq(selector)
]
if containing:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
if any([c.lower() in element.full_text.lower() for c in containing]):
elements.append(element)
elements.reverse()
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def xpath(self, selector: str, *, clean: bool = False, first: bool = False, _encoding: str = None) -> _XPath:
"""Given an XPath selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: XPath Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
If a sub-selector is specified (e.g. ``//a/@href``), a simple
list of results is returned.
See W3School's `XPath Examples
<https://www.w3schools.com/xml/xpath_examples.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
selected = self.lxml.xpath(selector)
elements = [
Element(element=selection, url=self.url, default_encoding=_encoding or self.encoding)
if not isinstance(selection, etree._ElementUnicodeResult) else str(selection)
for selection in selected
]
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def search(self, template: str) -> Result:
"""Search the :class:`Element <Element>` for the given Parse template.
:param template: The Parse template to use.
"""
return parse_search(template, self.html)
def search_all(self, template: str) -> _Result:
"""Search the :class:`Element <Element>` (multiple times) for the given parse
template.
:param template: The Parse template to use.
"""
return [r for r in findall(template, self.html)]
@property
def links(self) -> _Links:
"""All found links on page, in as–is form."""
def gen():
for link in self.find('a'):
try:
href = link.attrs['href'].strip()
if href and not (href.startswith('#') and self.skip_anchors) and not href.startswith(('javascript:', 'mailto:')):
yield href
except KeyError:
pass
return set(gen())
def _make_absolute(self, link):
"""Makes a given link absolute."""
# Parse the link with stdlib.
parsed = urlparse(link)._asdict()
# If link is relative, then join it with base_url.
if not parsed['netloc']:
return urljoin(self.base_url, link)
# Link is absolute; if it lacks a scheme, add one from base_url.
if not parsed['scheme']:
parsed['scheme'] = urlparse(self.base_url).scheme
# Reconstruct the URL to incorporate the new scheme.
parsed = (v for v in parsed.values())
return urlunparse(parsed)
# Link is absolute and complete with scheme; nothing to be done here.
return link
@property
def absolute_links(self) -> _Links:
"""All found links on page, in absolute form
(`learn more <https://www.navegabem.com/absolute-or-relative-links.html>`_).
"""
def gen():
for link in self.links:
yield self._make_absolute(link)
return set(gen())
@property
def base_url(self) -> _URL:
"""The base URL for the page. Supports the ``<base>`` tag
(`learn more <https://www.w3schools.com/tags/tag_base.asp>`_)."""
# Support for <base> tag.
base = self.find('base', first=True)
if base:
result = base.attrs.get('href', '').strip()
if result:
return result
# Parse the url to separate out the path
parsed = urlparse(self.url)._asdict()
# Remove any part of the path after the last '/'
parsed['path'] = '/'.join(parsed['path'].split('/')[:-1]) + '/'
# Reconstruct the url with the modified path
parsed = (v for v in parsed.values())
url = urlunparse(parsed)
return url
|
kennethreitz/requests-html | requests_html.py | BaseParser.find | python | def find(self, selector: str = "*", *, containing: _Containing = None, clean: bool = False, first: bool = False, _encoding: str = None) -> _Find:
# Convert a single containing into a list.
if isinstance(containing, str):
containing = [containing]
encoding = _encoding or self.encoding
elements = [
Element(element=found, url=self.url, default_encoding=encoding)
for found in self.pq(selector)
]
if containing:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
if any([c.lower() in element.full_text.lower() for c in containing]):
elements.append(element)
elements.reverse()
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first) | Given a CSS Selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: CSS Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param containing: If specified, only return elements that contain the provided text.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
Example CSS Selectors:
- ``a``
- ``a.someClass``
- ``a#someID``
- ``a[target=_blank]``
See W3School's `CSS Selectors Reference
<https://www.w3schools.com/cssref/css_selectors.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found. | train | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L180-L234 | null | class BaseParser:
"""A basic HTML/Element Parser, for Humans.
:param element: The element from which to base the parsing upon.
:param default_encoding: Which encoding to default to.
:param html: HTML from which to base the parsing upon (optional).
:param url: The URL from which the HTML originated, used for ``absolute_links``.
"""
def __init__(self, *, element, default_encoding: _DefaultEncoding = None, html: _HTML = None, url: _URL) -> None:
self.element = element
self.url = url
self.skip_anchors = True
self.default_encoding = default_encoding
self._encoding = None
self._html = html.encode(DEFAULT_ENCODING) if isinstance(html, str) else html
self._lxml = None
self._pq = None
@property
def raw_html(self) -> _RawHTML:
"""Bytes representation of the HTML content.
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self._html
else:
return etree.tostring(self.element, encoding='unicode').strip().encode(self.encoding)
@property
def html(self) -> _BaseHTML:
"""Unicode representation of the HTML content
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self.raw_html.decode(self.encoding, errors='replace')
else:
return etree.tostring(self.element, encoding='unicode').strip()
@html.setter
def html(self, html: str) -> None:
self._html = html.encode(self.encoding)
@raw_html.setter
def raw_html(self, html: bytes) -> None:
"""Property setter for self.html."""
self._html = html
@property
def encoding(self) -> _Encoding:
"""The encoding string to be used, extracted from the HTML and
:class:`HTMLResponse <HTMLResponse>` headers.
"""
if self._encoding:
return self._encoding
# Scan meta tags for charset.
if self._html:
self._encoding = html_to_unicode(self.default_encoding, self._html)[0]
# Fall back to requests' detected encoding if decode fails.
try:
self.raw_html.decode(self.encoding, errors='replace')
except UnicodeDecodeError:
self._encoding = self.default_encoding
return self._encoding if self._encoding else self.default_encoding
@encoding.setter
def encoding(self, enc: str) -> None:
"""Property setter for self.encoding."""
self._encoding = enc
@property
def pq(self) -> PyQuery:
"""`PyQuery <https://pythonhosted.org/pyquery/>`_ representation
of the :class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._pq is None:
self._pq = PyQuery(self.lxml)
return self._pq
@property
def lxml(self) -> HtmlElement:
"""`lxml <http://lxml.de>`_ representation of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._lxml is None:
try:
self._lxml = soup_parse(self.html, features='html.parser')
except ValueError:
self._lxml = lxml.html.fromstring(self.raw_html)
return self._lxml
@property
def text(self) -> _Text:
"""The text content of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.pq.text()
@property
def full_text(self) -> _Text:
"""The full text content (including links) of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.lxml.text_content()
def xpath(self, selector: str, *, clean: bool = False, first: bool = False, _encoding: str = None) -> _XPath:
"""Given an XPath selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: XPath Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
If a sub-selector is specified (e.g. ``//a/@href``), a simple
list of results is returned.
See W3School's `XPath Examples
<https://www.w3schools.com/xml/xpath_examples.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
selected = self.lxml.xpath(selector)
elements = [
Element(element=selection, url=self.url, default_encoding=_encoding or self.encoding)
if not isinstance(selection, etree._ElementUnicodeResult) else str(selection)
for selection in selected
]
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def search(self, template: str) -> Result:
"""Search the :class:`Element <Element>` for the given Parse template.
:param template: The Parse template to use.
"""
return parse_search(template, self.html)
def search_all(self, template: str) -> _Result:
"""Search the :class:`Element <Element>` (multiple times) for the given parse
template.
:param template: The Parse template to use.
"""
return [r for r in findall(template, self.html)]
@property
def links(self) -> _Links:
"""All found links on page, in as–is form."""
def gen():
for link in self.find('a'):
try:
href = link.attrs['href'].strip()
if href and not (href.startswith('#') and self.skip_anchors) and not href.startswith(('javascript:', 'mailto:')):
yield href
except KeyError:
pass
return set(gen())
def _make_absolute(self, link):
"""Makes a given link absolute."""
# Parse the link with stdlib.
parsed = urlparse(link)._asdict()
# If link is relative, then join it with base_url.
if not parsed['netloc']:
return urljoin(self.base_url, link)
# Link is absolute; if it lacks a scheme, add one from base_url.
if not parsed['scheme']:
parsed['scheme'] = urlparse(self.base_url).scheme
# Reconstruct the URL to incorporate the new scheme.
parsed = (v for v in parsed.values())
return urlunparse(parsed)
# Link is absolute and complete with scheme; nothing to be done here.
return link
@property
def absolute_links(self) -> _Links:
"""All found links on page, in absolute form
(`learn more <https://www.navegabem.com/absolute-or-relative-links.html>`_).
"""
def gen():
for link in self.links:
yield self._make_absolute(link)
return set(gen())
@property
def base_url(self) -> _URL:
"""The base URL for the page. Supports the ``<base>`` tag
(`learn more <https://www.w3schools.com/tags/tag_base.asp>`_)."""
# Support for <base> tag.
base = self.find('base', first=True)
if base:
result = base.attrs.get('href', '').strip()
if result:
return result
# Parse the url to separate out the path
parsed = urlparse(self.url)._asdict()
# Remove any part of the path after the last '/'
parsed['path'] = '/'.join(parsed['path'].split('/')[:-1]) + '/'
# Reconstruct the url with the modified path
parsed = (v for v in parsed.values())
url = urlunparse(parsed)
return url
|
kennethreitz/requests-html | requests_html.py | BaseParser.xpath | python | def xpath(self, selector: str, *, clean: bool = False, first: bool = False, _encoding: str = None) -> _XPath:
selected = self.lxml.xpath(selector)
elements = [
Element(element=selection, url=self.url, default_encoding=_encoding or self.encoding)
if not isinstance(selection, etree._ElementUnicodeResult) else str(selection)
for selection in selected
]
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first) | Given an XPath selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: XPath Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
If a sub-selector is specified (e.g. ``//a/@href``), a simple
list of results is returned.
See W3School's `XPath Examples
<https://www.w3schools.com/xml/xpath_examples.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found. | train | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L236-L272 | null | class BaseParser:
"""A basic HTML/Element Parser, for Humans.
:param element: The element from which to base the parsing upon.
:param default_encoding: Which encoding to default to.
:param html: HTML from which to base the parsing upon (optional).
:param url: The URL from which the HTML originated, used for ``absolute_links``.
"""
def __init__(self, *, element, default_encoding: _DefaultEncoding = None, html: _HTML = None, url: _URL) -> None:
self.element = element
self.url = url
self.skip_anchors = True
self.default_encoding = default_encoding
self._encoding = None
self._html = html.encode(DEFAULT_ENCODING) if isinstance(html, str) else html
self._lxml = None
self._pq = None
@property
def raw_html(self) -> _RawHTML:
"""Bytes representation of the HTML content.
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self._html
else:
return etree.tostring(self.element, encoding='unicode').strip().encode(self.encoding)
@property
def html(self) -> _BaseHTML:
"""Unicode representation of the HTML content
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self.raw_html.decode(self.encoding, errors='replace')
else:
return etree.tostring(self.element, encoding='unicode').strip()
@html.setter
def html(self, html: str) -> None:
self._html = html.encode(self.encoding)
@raw_html.setter
def raw_html(self, html: bytes) -> None:
"""Property setter for self.html."""
self._html = html
@property
def encoding(self) -> _Encoding:
"""The encoding string to be used, extracted from the HTML and
:class:`HTMLResponse <HTMLResponse>` headers.
"""
if self._encoding:
return self._encoding
# Scan meta tags for charset.
if self._html:
self._encoding = html_to_unicode(self.default_encoding, self._html)[0]
# Fall back to requests' detected encoding if decode fails.
try:
self.raw_html.decode(self.encoding, errors='replace')
except UnicodeDecodeError:
self._encoding = self.default_encoding
return self._encoding if self._encoding else self.default_encoding
@encoding.setter
def encoding(self, enc: str) -> None:
"""Property setter for self.encoding."""
self._encoding = enc
@property
def pq(self) -> PyQuery:
"""`PyQuery <https://pythonhosted.org/pyquery/>`_ representation
of the :class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._pq is None:
self._pq = PyQuery(self.lxml)
return self._pq
@property
def lxml(self) -> HtmlElement:
"""`lxml <http://lxml.de>`_ representation of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._lxml is None:
try:
self._lxml = soup_parse(self.html, features='html.parser')
except ValueError:
self._lxml = lxml.html.fromstring(self.raw_html)
return self._lxml
@property
def text(self) -> _Text:
"""The text content of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.pq.text()
@property
def full_text(self) -> _Text:
"""The full text content (including links) of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.lxml.text_content()
def find(self, selector: str = "*", *, containing: _Containing = None, clean: bool = False, first: bool = False, _encoding: str = None) -> _Find:
"""Given a CSS Selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: CSS Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param containing: If specified, only return elements that contain the provided text.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
Example CSS Selectors:
- ``a``
- ``a.someClass``
- ``a#someID``
- ``a[target=_blank]``
See W3School's `CSS Selectors Reference
<https://www.w3schools.com/cssref/css_selectors.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
# Convert a single containing into a list.
if isinstance(containing, str):
containing = [containing]
encoding = _encoding or self.encoding
elements = [
Element(element=found, url=self.url, default_encoding=encoding)
for found in self.pq(selector)
]
if containing:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
if any([c.lower() in element.full_text.lower() for c in containing]):
elements.append(element)
elements.reverse()
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def search(self, template: str) -> Result:
"""Search the :class:`Element <Element>` for the given Parse template.
:param template: The Parse template to use.
"""
return parse_search(template, self.html)
def search_all(self, template: str) -> _Result:
"""Search the :class:`Element <Element>` (multiple times) for the given parse
template.
:param template: The Parse template to use.
"""
return [r for r in findall(template, self.html)]
@property
def links(self) -> _Links:
"""All found links on page, in as–is form."""
def gen():
for link in self.find('a'):
try:
href = link.attrs['href'].strip()
if href and not (href.startswith('#') and self.skip_anchors) and not href.startswith(('javascript:', 'mailto:')):
yield href
except KeyError:
pass
return set(gen())
def _make_absolute(self, link):
"""Makes a given link absolute."""
# Parse the link with stdlib.
parsed = urlparse(link)._asdict()
# If link is relative, then join it with base_url.
if not parsed['netloc']:
return urljoin(self.base_url, link)
# Link is absolute; if it lacks a scheme, add one from base_url.
if not parsed['scheme']:
parsed['scheme'] = urlparse(self.base_url).scheme
# Reconstruct the URL to incorporate the new scheme.
parsed = (v for v in parsed.values())
return urlunparse(parsed)
# Link is absolute and complete with scheme; nothing to be done here.
return link
@property
def absolute_links(self) -> _Links:
"""All found links on page, in absolute form
(`learn more <https://www.navegabem.com/absolute-or-relative-links.html>`_).
"""
def gen():
for link in self.links:
yield self._make_absolute(link)
return set(gen())
@property
def base_url(self) -> _URL:
"""The base URL for the page. Supports the ``<base>`` tag
(`learn more <https://www.w3schools.com/tags/tag_base.asp>`_)."""
# Support for <base> tag.
base = self.find('base', first=True)
if base:
result = base.attrs.get('href', '').strip()
if result:
return result
# Parse the url to separate out the path
parsed = urlparse(self.url)._asdict()
# Remove any part of the path after the last '/'
parsed['path'] = '/'.join(parsed['path'].split('/')[:-1]) + '/'
# Reconstruct the url with the modified path
parsed = (v for v in parsed.values())
url = urlunparse(parsed)
return url
|
kennethreitz/requests-html | requests_html.py | BaseParser.search_all | python | def search_all(self, template: str) -> _Result:
return [r for r in findall(template, self.html)] | Search the :class:`Element <Element>` (multiple times) for the given parse
template.
:param template: The Parse template to use. | train | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L282-L288 | null | class BaseParser:
"""A basic HTML/Element Parser, for Humans.
:param element: The element from which to base the parsing upon.
:param default_encoding: Which encoding to default to.
:param html: HTML from which to base the parsing upon (optional).
:param url: The URL from which the HTML originated, used for ``absolute_links``.
"""
def __init__(self, *, element, default_encoding: _DefaultEncoding = None, html: _HTML = None, url: _URL) -> None:
self.element = element
self.url = url
self.skip_anchors = True
self.default_encoding = default_encoding
self._encoding = None
self._html = html.encode(DEFAULT_ENCODING) if isinstance(html, str) else html
self._lxml = None
self._pq = None
@property
def raw_html(self) -> _RawHTML:
"""Bytes representation of the HTML content.
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self._html
else:
return etree.tostring(self.element, encoding='unicode').strip().encode(self.encoding)
@property
def html(self) -> _BaseHTML:
"""Unicode representation of the HTML content
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self.raw_html.decode(self.encoding, errors='replace')
else:
return etree.tostring(self.element, encoding='unicode').strip()
@html.setter
def html(self, html: str) -> None:
self._html = html.encode(self.encoding)
@raw_html.setter
def raw_html(self, html: bytes) -> None:
"""Property setter for self.html."""
self._html = html
@property
def encoding(self) -> _Encoding:
"""The encoding string to be used, extracted from the HTML and
:class:`HTMLResponse <HTMLResponse>` headers.
"""
if self._encoding:
return self._encoding
# Scan meta tags for charset.
if self._html:
self._encoding = html_to_unicode(self.default_encoding, self._html)[0]
# Fall back to requests' detected encoding if decode fails.
try:
self.raw_html.decode(self.encoding, errors='replace')
except UnicodeDecodeError:
self._encoding = self.default_encoding
return self._encoding if self._encoding else self.default_encoding
@encoding.setter
def encoding(self, enc: str) -> None:
"""Property setter for self.encoding."""
self._encoding = enc
@property
def pq(self) -> PyQuery:
"""`PyQuery <https://pythonhosted.org/pyquery/>`_ representation
of the :class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._pq is None:
self._pq = PyQuery(self.lxml)
return self._pq
@property
def lxml(self) -> HtmlElement:
"""`lxml <http://lxml.de>`_ representation of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._lxml is None:
try:
self._lxml = soup_parse(self.html, features='html.parser')
except ValueError:
self._lxml = lxml.html.fromstring(self.raw_html)
return self._lxml
@property
def text(self) -> _Text:
"""The text content of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.pq.text()
@property
def full_text(self) -> _Text:
"""The full text content (including links) of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.lxml.text_content()
def find(self, selector: str = "*", *, containing: _Containing = None, clean: bool = False, first: bool = False, _encoding: str = None) -> _Find:
"""Given a CSS Selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: CSS Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param containing: If specified, only return elements that contain the provided text.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
Example CSS Selectors:
- ``a``
- ``a.someClass``
- ``a#someID``
- ``a[target=_blank]``
See W3School's `CSS Selectors Reference
<https://www.w3schools.com/cssref/css_selectors.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
# Convert a single containing into a list.
if isinstance(containing, str):
containing = [containing]
encoding = _encoding or self.encoding
elements = [
Element(element=found, url=self.url, default_encoding=encoding)
for found in self.pq(selector)
]
if containing:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
if any([c.lower() in element.full_text.lower() for c in containing]):
elements.append(element)
elements.reverse()
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def xpath(self, selector: str, *, clean: bool = False, first: bool = False, _encoding: str = None) -> _XPath:
"""Given an XPath selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: XPath Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
If a sub-selector is specified (e.g. ``//a/@href``), a simple
list of results is returned.
See W3School's `XPath Examples
<https://www.w3schools.com/xml/xpath_examples.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
selected = self.lxml.xpath(selector)
elements = [
Element(element=selection, url=self.url, default_encoding=_encoding or self.encoding)
if not isinstance(selection, etree._ElementUnicodeResult) else str(selection)
for selection in selected
]
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def search(self, template: str) -> Result:
"""Search the :class:`Element <Element>` for the given Parse template.
:param template: The Parse template to use.
"""
return parse_search(template, self.html)
@property
def links(self) -> _Links:
"""All found links on page, in as–is form."""
def gen():
for link in self.find('a'):
try:
href = link.attrs['href'].strip()
if href and not (href.startswith('#') and self.skip_anchors) and not href.startswith(('javascript:', 'mailto:')):
yield href
except KeyError:
pass
return set(gen())
def _make_absolute(self, link):
"""Makes a given link absolute."""
# Parse the link with stdlib.
parsed = urlparse(link)._asdict()
# If link is relative, then join it with base_url.
if not parsed['netloc']:
return urljoin(self.base_url, link)
# Link is absolute; if it lacks a scheme, add one from base_url.
if not parsed['scheme']:
parsed['scheme'] = urlparse(self.base_url).scheme
# Reconstruct the URL to incorporate the new scheme.
parsed = (v for v in parsed.values())
return urlunparse(parsed)
# Link is absolute and complete with scheme; nothing to be done here.
return link
@property
def absolute_links(self) -> _Links:
"""All found links on page, in absolute form
(`learn more <https://www.navegabem.com/absolute-or-relative-links.html>`_).
"""
def gen():
for link in self.links:
yield self._make_absolute(link)
return set(gen())
@property
def base_url(self) -> _URL:
"""The base URL for the page. Supports the ``<base>`` tag
(`learn more <https://www.w3schools.com/tags/tag_base.asp>`_)."""
# Support for <base> tag.
base = self.find('base', first=True)
if base:
result = base.attrs.get('href', '').strip()
if result:
return result
# Parse the url to separate out the path
parsed = urlparse(self.url)._asdict()
# Remove any part of the path after the last '/'
parsed['path'] = '/'.join(parsed['path'].split('/')[:-1]) + '/'
# Reconstruct the url with the modified path
parsed = (v for v in parsed.values())
url = urlunparse(parsed)
return url
|
kennethreitz/requests-html | requests_html.py | BaseParser.links | python | def links(self) -> _Links:
def gen():
for link in self.find('a'):
try:
href = link.attrs['href'].strip()
if href and not (href.startswith('#') and self.skip_anchors) and not href.startswith(('javascript:', 'mailto:')):
yield href
except KeyError:
pass
return set(gen()) | All found links on page, in as–is form. | train | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L291-L304 | [
"def gen():\n for link in self.find('a'):\n\n try:\n href = link.attrs['href'].strip()\n if href and not (href.startswith('#') and self.skip_anchors) and not href.startswith(('javascript:', 'mailto:')):\n yield href\n except KeyError:\n pass\n"
] | class BaseParser:
"""A basic HTML/Element Parser, for Humans.
:param element: The element from which to base the parsing upon.
:param default_encoding: Which encoding to default to.
:param html: HTML from which to base the parsing upon (optional).
:param url: The URL from which the HTML originated, used for ``absolute_links``.
"""
def __init__(self, *, element, default_encoding: _DefaultEncoding = None, html: _HTML = None, url: _URL) -> None:
self.element = element
self.url = url
self.skip_anchors = True
self.default_encoding = default_encoding
self._encoding = None
self._html = html.encode(DEFAULT_ENCODING) if isinstance(html, str) else html
self._lxml = None
self._pq = None
@property
def raw_html(self) -> _RawHTML:
"""Bytes representation of the HTML content.
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self._html
else:
return etree.tostring(self.element, encoding='unicode').strip().encode(self.encoding)
@property
def html(self) -> _BaseHTML:
"""Unicode representation of the HTML content
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self.raw_html.decode(self.encoding, errors='replace')
else:
return etree.tostring(self.element, encoding='unicode').strip()
@html.setter
def html(self, html: str) -> None:
self._html = html.encode(self.encoding)
@raw_html.setter
def raw_html(self, html: bytes) -> None:
"""Property setter for self.html."""
self._html = html
@property
def encoding(self) -> _Encoding:
"""The encoding string to be used, extracted from the HTML and
:class:`HTMLResponse <HTMLResponse>` headers.
"""
if self._encoding:
return self._encoding
# Scan meta tags for charset.
if self._html:
self._encoding = html_to_unicode(self.default_encoding, self._html)[0]
# Fall back to requests' detected encoding if decode fails.
try:
self.raw_html.decode(self.encoding, errors='replace')
except UnicodeDecodeError:
self._encoding = self.default_encoding
return self._encoding if self._encoding else self.default_encoding
@encoding.setter
def encoding(self, enc: str) -> None:
"""Property setter for self.encoding."""
self._encoding = enc
@property
def pq(self) -> PyQuery:
"""`PyQuery <https://pythonhosted.org/pyquery/>`_ representation
of the :class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._pq is None:
self._pq = PyQuery(self.lxml)
return self._pq
@property
def lxml(self) -> HtmlElement:
"""`lxml <http://lxml.de>`_ representation of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._lxml is None:
try:
self._lxml = soup_parse(self.html, features='html.parser')
except ValueError:
self._lxml = lxml.html.fromstring(self.raw_html)
return self._lxml
@property
def text(self) -> _Text:
"""The text content of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.pq.text()
@property
def full_text(self) -> _Text:
"""The full text content (including links) of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.lxml.text_content()
def find(self, selector: str = "*", *, containing: _Containing = None, clean: bool = False, first: bool = False, _encoding: str = None) -> _Find:
"""Given a CSS Selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: CSS Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param containing: If specified, only return elements that contain the provided text.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
Example CSS Selectors:
- ``a``
- ``a.someClass``
- ``a#someID``
- ``a[target=_blank]``
See W3School's `CSS Selectors Reference
<https://www.w3schools.com/cssref/css_selectors.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
# Convert a single containing into a list.
if isinstance(containing, str):
containing = [containing]
encoding = _encoding or self.encoding
elements = [
Element(element=found, url=self.url, default_encoding=encoding)
for found in self.pq(selector)
]
if containing:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
if any([c.lower() in element.full_text.lower() for c in containing]):
elements.append(element)
elements.reverse()
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def xpath(self, selector: str, *, clean: bool = False, first: bool = False, _encoding: str = None) -> _XPath:
"""Given an XPath selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: XPath Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
If a sub-selector is specified (e.g. ``//a/@href``), a simple
list of results is returned.
See W3School's `XPath Examples
<https://www.w3schools.com/xml/xpath_examples.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
selected = self.lxml.xpath(selector)
elements = [
Element(element=selection, url=self.url, default_encoding=_encoding or self.encoding)
if not isinstance(selection, etree._ElementUnicodeResult) else str(selection)
for selection in selected
]
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def search(self, template: str) -> Result:
"""Search the :class:`Element <Element>` for the given Parse template.
:param template: The Parse template to use.
"""
return parse_search(template, self.html)
def search_all(self, template: str) -> _Result:
"""Search the :class:`Element <Element>` (multiple times) for the given parse
template.
:param template: The Parse template to use.
"""
return [r for r in findall(template, self.html)]
@property
def _make_absolute(self, link):
"""Makes a given link absolute."""
# Parse the link with stdlib.
parsed = urlparse(link)._asdict()
# If link is relative, then join it with base_url.
if not parsed['netloc']:
return urljoin(self.base_url, link)
# Link is absolute; if it lacks a scheme, add one from base_url.
if not parsed['scheme']:
parsed['scheme'] = urlparse(self.base_url).scheme
# Reconstruct the URL to incorporate the new scheme.
parsed = (v for v in parsed.values())
return urlunparse(parsed)
# Link is absolute and complete with scheme; nothing to be done here.
return link
@property
def absolute_links(self) -> _Links:
"""All found links on page, in absolute form
(`learn more <https://www.navegabem.com/absolute-or-relative-links.html>`_).
"""
def gen():
for link in self.links:
yield self._make_absolute(link)
return set(gen())
@property
def base_url(self) -> _URL:
"""The base URL for the page. Supports the ``<base>`` tag
(`learn more <https://www.w3schools.com/tags/tag_base.asp>`_)."""
# Support for <base> tag.
base = self.find('base', first=True)
if base:
result = base.attrs.get('href', '').strip()
if result:
return result
# Parse the url to separate out the path
parsed = urlparse(self.url)._asdict()
# Remove any part of the path after the last '/'
parsed['path'] = '/'.join(parsed['path'].split('/')[:-1]) + '/'
# Reconstruct the url with the modified path
parsed = (v for v in parsed.values())
url = urlunparse(parsed)
return url
|
kennethreitz/requests-html | requests_html.py | BaseParser._make_absolute | python | def _make_absolute(self, link):
# Parse the link with stdlib.
parsed = urlparse(link)._asdict()
# If link is relative, then join it with base_url.
if not parsed['netloc']:
return urljoin(self.base_url, link)
# Link is absolute; if it lacks a scheme, add one from base_url.
if not parsed['scheme']:
parsed['scheme'] = urlparse(self.base_url).scheme
# Reconstruct the URL to incorporate the new scheme.
parsed = (v for v in parsed.values())
return urlunparse(parsed)
# Link is absolute and complete with scheme; nothing to be done here.
return link | Makes a given link absolute. | train | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L306-L325 | null | class BaseParser:
"""A basic HTML/Element Parser, for Humans.
:param element: The element from which to base the parsing upon.
:param default_encoding: Which encoding to default to.
:param html: HTML from which to base the parsing upon (optional).
:param url: The URL from which the HTML originated, used for ``absolute_links``.
"""
def __init__(self, *, element, default_encoding: _DefaultEncoding = None, html: _HTML = None, url: _URL) -> None:
self.element = element
self.url = url
self.skip_anchors = True
self.default_encoding = default_encoding
self._encoding = None
self._html = html.encode(DEFAULT_ENCODING) if isinstance(html, str) else html
self._lxml = None
self._pq = None
@property
def raw_html(self) -> _RawHTML:
"""Bytes representation of the HTML content.
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self._html
else:
return etree.tostring(self.element, encoding='unicode').strip().encode(self.encoding)
@property
def html(self) -> _BaseHTML:
"""Unicode representation of the HTML content
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self.raw_html.decode(self.encoding, errors='replace')
else:
return etree.tostring(self.element, encoding='unicode').strip()
@html.setter
def html(self, html: str) -> None:
self._html = html.encode(self.encoding)
@raw_html.setter
def raw_html(self, html: bytes) -> None:
"""Property setter for self.html."""
self._html = html
@property
def encoding(self) -> _Encoding:
"""The encoding string to be used, extracted from the HTML and
:class:`HTMLResponse <HTMLResponse>` headers.
"""
if self._encoding:
return self._encoding
# Scan meta tags for charset.
if self._html:
self._encoding = html_to_unicode(self.default_encoding, self._html)[0]
# Fall back to requests' detected encoding if decode fails.
try:
self.raw_html.decode(self.encoding, errors='replace')
except UnicodeDecodeError:
self._encoding = self.default_encoding
return self._encoding if self._encoding else self.default_encoding
@encoding.setter
def encoding(self, enc: str) -> None:
"""Property setter for self.encoding."""
self._encoding = enc
@property
def pq(self) -> PyQuery:
"""`PyQuery <https://pythonhosted.org/pyquery/>`_ representation
of the :class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._pq is None:
self._pq = PyQuery(self.lxml)
return self._pq
@property
def lxml(self) -> HtmlElement:
"""`lxml <http://lxml.de>`_ representation of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._lxml is None:
try:
self._lxml = soup_parse(self.html, features='html.parser')
except ValueError:
self._lxml = lxml.html.fromstring(self.raw_html)
return self._lxml
@property
def text(self) -> _Text:
"""The text content of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.pq.text()
@property
def full_text(self) -> _Text:
"""The full text content (including links) of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.lxml.text_content()
def find(self, selector: str = "*", *, containing: _Containing = None, clean: bool = False, first: bool = False, _encoding: str = None) -> _Find:
"""Given a CSS Selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: CSS Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param containing: If specified, only return elements that contain the provided text.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
Example CSS Selectors:
- ``a``
- ``a.someClass``
- ``a#someID``
- ``a[target=_blank]``
See W3School's `CSS Selectors Reference
<https://www.w3schools.com/cssref/css_selectors.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
# Convert a single containing into a list.
if isinstance(containing, str):
containing = [containing]
encoding = _encoding or self.encoding
elements = [
Element(element=found, url=self.url, default_encoding=encoding)
for found in self.pq(selector)
]
if containing:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
if any([c.lower() in element.full_text.lower() for c in containing]):
elements.append(element)
elements.reverse()
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def xpath(self, selector: str, *, clean: bool = False, first: bool = False, _encoding: str = None) -> _XPath:
"""Given an XPath selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: XPath Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
If a sub-selector is specified (e.g. ``//a/@href``), a simple
list of results is returned.
See W3School's `XPath Examples
<https://www.w3schools.com/xml/xpath_examples.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
selected = self.lxml.xpath(selector)
elements = [
Element(element=selection, url=self.url, default_encoding=_encoding or self.encoding)
if not isinstance(selection, etree._ElementUnicodeResult) else str(selection)
for selection in selected
]
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def search(self, template: str) -> Result:
"""Search the :class:`Element <Element>` for the given Parse template.
:param template: The Parse template to use.
"""
return parse_search(template, self.html)
def search_all(self, template: str) -> _Result:
"""Search the :class:`Element <Element>` (multiple times) for the given parse
template.
:param template: The Parse template to use.
"""
return [r for r in findall(template, self.html)]
@property
def links(self) -> _Links:
"""All found links on page, in as–is form."""
def gen():
for link in self.find('a'):
try:
href = link.attrs['href'].strip()
if href and not (href.startswith('#') and self.skip_anchors) and not href.startswith(('javascript:', 'mailto:')):
yield href
except KeyError:
pass
return set(gen())
@property
def absolute_links(self) -> _Links:
"""All found links on page, in absolute form
(`learn more <https://www.navegabem.com/absolute-or-relative-links.html>`_).
"""
def gen():
for link in self.links:
yield self._make_absolute(link)
return set(gen())
@property
def base_url(self) -> _URL:
"""The base URL for the page. Supports the ``<base>`` tag
(`learn more <https://www.w3schools.com/tags/tag_base.asp>`_)."""
# Support for <base> tag.
base = self.find('base', first=True)
if base:
result = base.attrs.get('href', '').strip()
if result:
return result
# Parse the url to separate out the path
parsed = urlparse(self.url)._asdict()
# Remove any part of the path after the last '/'
parsed['path'] = '/'.join(parsed['path'].split('/')[:-1]) + '/'
# Reconstruct the url with the modified path
parsed = (v for v in parsed.values())
url = urlunparse(parsed)
return url
|
kennethreitz/requests-html | requests_html.py | BaseParser.absolute_links | python | def absolute_links(self) -> _Links:
def gen():
for link in self.links:
yield self._make_absolute(link)
return set(gen()) | All found links on page, in absolute form
(`learn more <https://www.navegabem.com/absolute-or-relative-links.html>`_). | train | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L329-L338 | [
"def gen():\n for link in self.links:\n yield self._make_absolute(link)\n"
] | class BaseParser:
"""A basic HTML/Element Parser, for Humans.
:param element: The element from which to base the parsing upon.
:param default_encoding: Which encoding to default to.
:param html: HTML from which to base the parsing upon (optional).
:param url: The URL from which the HTML originated, used for ``absolute_links``.
"""
def __init__(self, *, element, default_encoding: _DefaultEncoding = None, html: _HTML = None, url: _URL) -> None:
self.element = element
self.url = url
self.skip_anchors = True
self.default_encoding = default_encoding
self._encoding = None
self._html = html.encode(DEFAULT_ENCODING) if isinstance(html, str) else html
self._lxml = None
self._pq = None
@property
def raw_html(self) -> _RawHTML:
"""Bytes representation of the HTML content.
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self._html
else:
return etree.tostring(self.element, encoding='unicode').strip().encode(self.encoding)
@property
def html(self) -> _BaseHTML:
"""Unicode representation of the HTML content
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self.raw_html.decode(self.encoding, errors='replace')
else:
return etree.tostring(self.element, encoding='unicode').strip()
@html.setter
def html(self, html: str) -> None:
self._html = html.encode(self.encoding)
@raw_html.setter
def raw_html(self, html: bytes) -> None:
"""Property setter for self.html."""
self._html = html
@property
def encoding(self) -> _Encoding:
"""The encoding string to be used, extracted from the HTML and
:class:`HTMLResponse <HTMLResponse>` headers.
"""
if self._encoding:
return self._encoding
# Scan meta tags for charset.
if self._html:
self._encoding = html_to_unicode(self.default_encoding, self._html)[0]
# Fall back to requests' detected encoding if decode fails.
try:
self.raw_html.decode(self.encoding, errors='replace')
except UnicodeDecodeError:
self._encoding = self.default_encoding
return self._encoding if self._encoding else self.default_encoding
@encoding.setter
def encoding(self, enc: str) -> None:
"""Property setter for self.encoding."""
self._encoding = enc
@property
def pq(self) -> PyQuery:
"""`PyQuery <https://pythonhosted.org/pyquery/>`_ representation
of the :class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._pq is None:
self._pq = PyQuery(self.lxml)
return self._pq
@property
def lxml(self) -> HtmlElement:
"""`lxml <http://lxml.de>`_ representation of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._lxml is None:
try:
self._lxml = soup_parse(self.html, features='html.parser')
except ValueError:
self._lxml = lxml.html.fromstring(self.raw_html)
return self._lxml
@property
def text(self) -> _Text:
"""The text content of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.pq.text()
@property
def full_text(self) -> _Text:
"""The full text content (including links) of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.lxml.text_content()
def find(self, selector: str = "*", *, containing: _Containing = None, clean: bool = False, first: bool = False, _encoding: str = None) -> _Find:
"""Given a CSS Selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: CSS Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param containing: If specified, only return elements that contain the provided text.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
Example CSS Selectors:
- ``a``
- ``a.someClass``
- ``a#someID``
- ``a[target=_blank]``
See W3School's `CSS Selectors Reference
<https://www.w3schools.com/cssref/css_selectors.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
# Convert a single containing into a list.
if isinstance(containing, str):
containing = [containing]
encoding = _encoding or self.encoding
elements = [
Element(element=found, url=self.url, default_encoding=encoding)
for found in self.pq(selector)
]
if containing:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
if any([c.lower() in element.full_text.lower() for c in containing]):
elements.append(element)
elements.reverse()
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def xpath(self, selector: str, *, clean: bool = False, first: bool = False, _encoding: str = None) -> _XPath:
"""Given an XPath selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: XPath Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
If a sub-selector is specified (e.g. ``//a/@href``), a simple
list of results is returned.
See W3School's `XPath Examples
<https://www.w3schools.com/xml/xpath_examples.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
selected = self.lxml.xpath(selector)
elements = [
Element(element=selection, url=self.url, default_encoding=_encoding or self.encoding)
if not isinstance(selection, etree._ElementUnicodeResult) else str(selection)
for selection in selected
]
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def search(self, template: str) -> Result:
"""Search the :class:`Element <Element>` for the given Parse template.
:param template: The Parse template to use.
"""
return parse_search(template, self.html)
def search_all(self, template: str) -> _Result:
"""Search the :class:`Element <Element>` (multiple times) for the given parse
template.
:param template: The Parse template to use.
"""
return [r for r in findall(template, self.html)]
@property
def links(self) -> _Links:
"""All found links on page, in as–is form."""
def gen():
for link in self.find('a'):
try:
href = link.attrs['href'].strip()
if href and not (href.startswith('#') and self.skip_anchors) and not href.startswith(('javascript:', 'mailto:')):
yield href
except KeyError:
pass
return set(gen())
def _make_absolute(self, link):
"""Makes a given link absolute."""
# Parse the link with stdlib.
parsed = urlparse(link)._asdict()
# If link is relative, then join it with base_url.
if not parsed['netloc']:
return urljoin(self.base_url, link)
# Link is absolute; if it lacks a scheme, add one from base_url.
if not parsed['scheme']:
parsed['scheme'] = urlparse(self.base_url).scheme
# Reconstruct the URL to incorporate the new scheme.
parsed = (v for v in parsed.values())
return urlunparse(parsed)
# Link is absolute and complete with scheme; nothing to be done here.
return link
@property
@property
def base_url(self) -> _URL:
"""The base URL for the page. Supports the ``<base>`` tag
(`learn more <https://www.w3schools.com/tags/tag_base.asp>`_)."""
# Support for <base> tag.
base = self.find('base', first=True)
if base:
result = base.attrs.get('href', '').strip()
if result:
return result
# Parse the url to separate out the path
parsed = urlparse(self.url)._asdict()
# Remove any part of the path after the last '/'
parsed['path'] = '/'.join(parsed['path'].split('/')[:-1]) + '/'
# Reconstruct the url with the modified path
parsed = (v for v in parsed.values())
url = urlunparse(parsed)
return url
|
kennethreitz/requests-html | requests_html.py | BaseParser.base_url | python | def base_url(self) -> _URL:
# Support for <base> tag.
base = self.find('base', first=True)
if base:
result = base.attrs.get('href', '').strip()
if result:
return result
# Parse the url to separate out the path
parsed = urlparse(self.url)._asdict()
# Remove any part of the path after the last '/'
parsed['path'] = '/'.join(parsed['path'].split('/')[:-1]) + '/'
# Reconstruct the url with the modified path
parsed = (v for v in parsed.values())
url = urlunparse(parsed)
return url | The base URL for the page. Supports the ``<base>`` tag
(`learn more <https://www.w3schools.com/tags/tag_base.asp>`_). | train | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L341-L362 | [
"def find(self, selector: str = \"*\", *, containing: _Containing = None, clean: bool = False, first: bool = False, _encoding: str = None) -> _Find:\n \"\"\"Given a CSS Selector, returns a list of\n :class:`Element <Element>` objects or a single one.\n\n :param selector: CSS Selector to use.\n :param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.\n :param containing: If specified, only return elements that contain the provided text.\n :param first: Whether or not to return just the first result.\n :param _encoding: The encoding format.\n\n Example CSS Selectors:\n\n - ``a``\n - ``a.someClass``\n - ``a#someID``\n - ``a[target=_blank]``\n\n See W3School's `CSS Selectors Reference\n <https://www.w3schools.com/cssref/css_selectors.asp>`_\n for more details.\n\n If ``first`` is ``True``, only returns the first\n :class:`Element <Element>` found.\n \"\"\"\n\n # Convert a single containing into a list.\n if isinstance(containing, str):\n containing = [containing]\n\n encoding = _encoding or self.encoding\n elements = [\n Element(element=found, url=self.url, default_encoding=encoding)\n for found in self.pq(selector)\n ]\n\n if containing:\n elements_copy = elements.copy()\n elements = []\n\n for element in elements_copy:\n if any([c.lower() in element.full_text.lower() for c in containing]):\n elements.append(element)\n\n elements.reverse()\n\n # Sanitize the found HTML.\n if clean:\n elements_copy = elements.copy()\n elements = []\n\n for element in elements_copy:\n element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))\n elements.append(element)\n\n return _get_first_or_list(elements, first)\n"
] | class BaseParser:
"""A basic HTML/Element Parser, for Humans.
:param element: The element from which to base the parsing upon.
:param default_encoding: Which encoding to default to.
:param html: HTML from which to base the parsing upon (optional).
:param url: The URL from which the HTML originated, used for ``absolute_links``.
"""
def __init__(self, *, element, default_encoding: _DefaultEncoding = None, html: _HTML = None, url: _URL) -> None:
self.element = element
self.url = url
self.skip_anchors = True
self.default_encoding = default_encoding
self._encoding = None
self._html = html.encode(DEFAULT_ENCODING) if isinstance(html, str) else html
self._lxml = None
self._pq = None
@property
def raw_html(self) -> _RawHTML:
"""Bytes representation of the HTML content.
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self._html
else:
return etree.tostring(self.element, encoding='unicode').strip().encode(self.encoding)
@property
def html(self) -> _BaseHTML:
"""Unicode representation of the HTML content
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self.raw_html.decode(self.encoding, errors='replace')
else:
return etree.tostring(self.element, encoding='unicode').strip()
@html.setter
def html(self, html: str) -> None:
self._html = html.encode(self.encoding)
@raw_html.setter
def raw_html(self, html: bytes) -> None:
"""Property setter for self.html."""
self._html = html
@property
def encoding(self) -> _Encoding:
"""The encoding string to be used, extracted from the HTML and
:class:`HTMLResponse <HTMLResponse>` headers.
"""
if self._encoding:
return self._encoding
# Scan meta tags for charset.
if self._html:
self._encoding = html_to_unicode(self.default_encoding, self._html)[0]
# Fall back to requests' detected encoding if decode fails.
try:
self.raw_html.decode(self.encoding, errors='replace')
except UnicodeDecodeError:
self._encoding = self.default_encoding
return self._encoding if self._encoding else self.default_encoding
@encoding.setter
def encoding(self, enc: str) -> None:
"""Property setter for self.encoding."""
self._encoding = enc
@property
def pq(self) -> PyQuery:
"""`PyQuery <https://pythonhosted.org/pyquery/>`_ representation
of the :class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._pq is None:
self._pq = PyQuery(self.lxml)
return self._pq
@property
def lxml(self) -> HtmlElement:
"""`lxml <http://lxml.de>`_ representation of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._lxml is None:
try:
self._lxml = soup_parse(self.html, features='html.parser')
except ValueError:
self._lxml = lxml.html.fromstring(self.raw_html)
return self._lxml
@property
def text(self) -> _Text:
"""The text content of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.pq.text()
@property
def full_text(self) -> _Text:
"""The full text content (including links) of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.lxml.text_content()
def find(self, selector: str = "*", *, containing: _Containing = None, clean: bool = False, first: bool = False, _encoding: str = None) -> _Find:
"""Given a CSS Selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: CSS Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param containing: If specified, only return elements that contain the provided text.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
Example CSS Selectors:
- ``a``
- ``a.someClass``
- ``a#someID``
- ``a[target=_blank]``
See W3School's `CSS Selectors Reference
<https://www.w3schools.com/cssref/css_selectors.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
# Convert a single containing into a list.
if isinstance(containing, str):
containing = [containing]
encoding = _encoding or self.encoding
elements = [
Element(element=found, url=self.url, default_encoding=encoding)
for found in self.pq(selector)
]
if containing:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
if any([c.lower() in element.full_text.lower() for c in containing]):
elements.append(element)
elements.reverse()
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def xpath(self, selector: str, *, clean: bool = False, first: bool = False, _encoding: str = None) -> _XPath:
"""Given an XPath selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: XPath Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
If a sub-selector is specified (e.g. ``//a/@href``), a simple
list of results is returned.
See W3School's `XPath Examples
<https://www.w3schools.com/xml/xpath_examples.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
selected = self.lxml.xpath(selector)
elements = [
Element(element=selection, url=self.url, default_encoding=_encoding or self.encoding)
if not isinstance(selection, etree._ElementUnicodeResult) else str(selection)
for selection in selected
]
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def search(self, template: str) -> Result:
"""Search the :class:`Element <Element>` for the given Parse template.
:param template: The Parse template to use.
"""
return parse_search(template, self.html)
def search_all(self, template: str) -> _Result:
"""Search the :class:`Element <Element>` (multiple times) for the given parse
template.
:param template: The Parse template to use.
"""
return [r for r in findall(template, self.html)]
@property
def links(self) -> _Links:
"""All found links on page, in as–is form."""
def gen():
for link in self.find('a'):
try:
href = link.attrs['href'].strip()
if href and not (href.startswith('#') and self.skip_anchors) and not href.startswith(('javascript:', 'mailto:')):
yield href
except KeyError:
pass
return set(gen())
def _make_absolute(self, link):
"""Makes a given link absolute."""
# Parse the link with stdlib.
parsed = urlparse(link)._asdict()
# If link is relative, then join it with base_url.
if not parsed['netloc']:
return urljoin(self.base_url, link)
# Link is absolute; if it lacks a scheme, add one from base_url.
if not parsed['scheme']:
parsed['scheme'] = urlparse(self.base_url).scheme
# Reconstruct the URL to incorporate the new scheme.
parsed = (v for v in parsed.values())
return urlunparse(parsed)
# Link is absolute and complete with scheme; nothing to be done here.
return link
@property
def absolute_links(self) -> _Links:
"""All found links on page, in absolute form
(`learn more <https://www.navegabem.com/absolute-or-relative-links.html>`_).
"""
def gen():
for link in self.links:
yield self._make_absolute(link)
return set(gen())
@property
|
kennethreitz/requests-html | requests_html.py | Element.attrs | python | def attrs(self) -> _Attrs:
if self._attrs is None:
self._attrs = {k: v for k, v in self.element.items()}
# Split class and rel up, as there are ussually many of them:
for attr in ['class', 'rel']:
if attr in self._attrs:
self._attrs[attr] = tuple(self._attrs[attr].split())
return self._attrs | Returns a dictionary of the attributes of the :class:`Element <Element>`
(`learn more <https://www.w3schools.com/tags/ref_attributes.asp>`_). | train | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L390-L402 | null | class Element(BaseParser):
"""An element of HTML.
:param element: The element from which to base the parsing upon.
:param url: The URL from which the HTML originated, used for ``absolute_links``.
:param default_encoding: Which encoding to default to.
"""
__slots__ = [
'element', 'url', 'skip_anchors', 'default_encoding', '_encoding',
'_html', '_lxml', '_pq', '_attrs', 'session'
]
def __init__(self, *, element, url: _URL, default_encoding: _DefaultEncoding = None) -> None:
super(Element, self).__init__(element=element, url=url, default_encoding=default_encoding)
self.element = element
self.tag = element.tag
self.lineno = element.sourceline
self._attrs = None
def __repr__(self) -> str:
attrs = ['{}={}'.format(attr, repr(self.attrs[attr])) for attr in self.attrs]
return "<Element {} {}>".format(repr(self.element.tag), ' '.join(attrs))
@property
|
kennethreitz/requests-html | requests_html.py | HTML.next | python | def next(self, fetch: bool = False, next_symbol: _NextSymbol = DEFAULT_NEXT_SYMBOL) -> _Next:
def get_next():
candidates = self.find('a', containing=next_symbol)
for candidate in candidates:
if candidate.attrs.get('href'):
# Support 'next' rel (e.g. reddit).
if 'next' in candidate.attrs.get('rel', []):
return candidate.attrs['href']
# Support 'next' in classnames.
for _class in candidate.attrs.get('class', []):
if 'next' in _class:
return candidate.attrs['href']
if 'page' in candidate.attrs['href']:
return candidate.attrs['href']
try:
# Resort to the last candidate.
return candidates[-1].attrs['href']
except IndexError:
return None
__next = get_next()
if __next:
url = self._make_absolute(__next)
else:
return None
if fetch:
return self.session.get(url)
else:
return url | Attempts to find the next page, if there is one. If ``fetch``
is ``True`` (default), returns :class:`HTML <HTML>` object of
next page. If ``fetch`` is ``False``, simply returns the next URL. | train | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L433-L472 | [
"def get_next():\n candidates = self.find('a', containing=next_symbol)\n\n for candidate in candidates:\n if candidate.attrs.get('href'):\n # Support 'next' rel (e.g. reddit).\n if 'next' in candidate.attrs.get('rel', []):\n return candidate.attrs['href']\n\n # Support 'next' in classnames.\n for _class in candidate.attrs.get('class', []):\n if 'next' in _class:\n return candidate.attrs['href']\n\n if 'page' in candidate.attrs['href']:\n return candidate.attrs['href']\n\n try:\n # Resort to the last candidate.\n return candidates[-1].attrs['href']\n except IndexError:\n return None\n"
] | class HTML(BaseParser):
"""An HTML document, ready for parsing.
:param url: The URL from which the HTML originated, used for ``absolute_links``.
:param html: HTML from which to base the parsing upon (optional).
:param default_encoding: Which encoding to default to.
"""
def __init__(self, *, session: Union['HTMLSession', 'AsyncHTMLSession'] = None, url: str = DEFAULT_URL, html: _HTML, default_encoding: str = DEFAULT_ENCODING, async_: bool = False) -> None:
# Convert incoming unicode HTML into bytes.
if isinstance(html, str):
html = html.encode(DEFAULT_ENCODING)
pq = PyQuery(html)
super(HTML, self).__init__(
element=pq('html') or pq.wrapAll('<html></html>')('html'),
html=html,
url=url,
default_encoding=default_encoding
)
self.session = session or async_ and AsyncHTMLSession() or HTMLSession()
self.page = None
self.next_symbol = DEFAULT_NEXT_SYMBOL
def __repr__(self) -> str:
return f"<HTML url={self.url!r}>"
def __iter__(self):
next = self
while True:
yield next
try:
next = next.next(fetch=True, next_symbol=self.next_symbol).html
except AttributeError:
break
def __next__(self):
return self.next(fetch=True, next_symbol=self.next_symbol).html
def __aiter__(self):
return self
async def __anext__(self):
while True:
url = self.next(fetch=False, next_symbol=self.next_symbol)
if not url:
break
response = await self.session.get(url)
return response.html
def add_next_symbol(self, next_symbol):
self.next_symbol.append(next_symbol)
async def _async_render(self, *, url: str, script: str = None, scrolldown, sleep: int, wait: float, reload, content: Optional[str], timeout: Union[float, int], keep_page: bool):
""" Handle page creation and js rendering. Internal use for render/arender methods. """
try:
page = await self.browser.newPage()
# Wait before rendering the page, to prevent timeouts.
await asyncio.sleep(wait)
# Load the given page (GET request, obviously.)
if reload:
await page.goto(url, options={'timeout': int(timeout * 1000)})
else:
await page.goto(f'data:text/html,{self.html}', options={'timeout': int(timeout * 1000)})
result = None
if script:
result = await page.evaluate(script)
if scrolldown:
for _ in range(scrolldown):
await page._keyboard.down('PageDown')
await asyncio.sleep(sleep)
else:
await asyncio.sleep(sleep)
if scrolldown:
await page._keyboard.up('PageDown')
# Return the content of the page, JavaScript evaluated.
content = await page.content()
if not keep_page:
await page.close()
page = None
return content, result, page
except TimeoutError:
await page.close()
page = None
return None
def render(self, retries: int = 8, script: str = None, wait: float = 0.2, scrolldown=False, sleep: int = 0, reload: bool = True, timeout: Union[float, int] = 8.0, keep_page: bool = False):
"""Reloads the response in Chromium, and replaces HTML content
with an updated version, with JavaScript executed.
:param retries: The number of times to retry loading the page in Chromium.
:param script: JavaScript to execute upon page load (optional).
:param wait: The number of seconds to wait before loading the page, preventing timeouts (optional).
:param scrolldown: Integer, if provided, of how many times to page down.
:param sleep: Integer, if provided, of how many long to sleep after initial render.
:param reload: If ``False``, content will not be loaded from the browser, but will be provided from memory.
:param keep_page: If ``True`` will allow you to interact with the browser page through ``r.html.page``.
If ``scrolldown`` is specified, the page will scrolldown the specified
number of times, after sleeping the specified amount of time
(e.g. ``scrolldown=10, sleep=1``).
If just ``sleep`` is provided, the rendering will wait *n* seconds, before
returning.
If ``script`` is specified, it will execute the provided JavaScript at
runtime. Example:
.. code-block:: python
script = \"\"\"
() => {
return {
width: document.documentElement.clientWidth,
height: document.documentElement.clientHeight,
deviceScaleFactor: window.devicePixelRatio,
}
}
\"\"\"
Returns the return value of the executed ``script``, if any is provided:
.. code-block:: python
>>> r.html.render(script=script)
{'width': 800, 'height': 600, 'deviceScaleFactor': 1}
Warning: the first time you run this method, it will download
Chromium into your home directory (``~/.pyppeteer``).
"""
self.browser = self.session.browser # Automatically create a event loop and browser
content = None
# Automatically set Reload to False, if example URL is being used.
if self.url == DEFAULT_URL:
reload = False
for i in range(retries):
if not content:
try:
content, result, page = self.session.loop.run_until_complete(self._async_render(url=self.url, script=script, sleep=sleep, wait=wait, content=self.html, reload=reload, scrolldown=scrolldown, timeout=timeout, keep_page=keep_page))
except TypeError:
pass
else:
break
if not content:
raise MaxRetries("Unable to render the page. Try increasing timeout")
html = HTML(url=self.url, html=content.encode(DEFAULT_ENCODING), default_encoding=DEFAULT_ENCODING)
self.__dict__.update(html.__dict__)
self.page = page
return result
async def arender(self, retries: int = 8, script: str = None, wait: float = 0.2, scrolldown=False, sleep: int = 0, reload: bool = True, timeout: Union[float, int] = 8.0, keep_page: bool = False):
""" Async version of render. Takes same parameters. """
self.browser = await self.session.browser
content = None
# Automatically set Reload to False, if example URL is being used.
if self.url == DEFAULT_URL:
reload = False
for _ in range(retries):
if not content:
try:
content, result, page = await self._async_render(url=self.url, script=script, sleep=sleep, wait=wait, content=self.html, reload=reload, scrolldown=scrolldown, timeout=timeout, keep_page=keep_page)
except TypeError:
pass
else:
break
if not content:
raise MaxRetries("Unable to render the page. Try increasing timeout")
html = HTML(url=self.url, html=content.encode(DEFAULT_ENCODING), default_encoding=DEFAULT_ENCODING)
self.__dict__.update(html.__dict__)
self.page = page
return result
|
kennethreitz/requests-html | requests_html.py | HTML._async_render | python | async def _async_render(self, *, url: str, script: str = None, scrolldown, sleep: int, wait: float, reload, content: Optional[str], timeout: Union[float, int], keep_page: bool):
try:
page = await self.browser.newPage()
# Wait before rendering the page, to prevent timeouts.
await asyncio.sleep(wait)
# Load the given page (GET request, obviously.)
if reload:
await page.goto(url, options={'timeout': int(timeout * 1000)})
else:
await page.goto(f'data:text/html,{self.html}', options={'timeout': int(timeout * 1000)})
result = None
if script:
result = await page.evaluate(script)
if scrolldown:
for _ in range(scrolldown):
await page._keyboard.down('PageDown')
await asyncio.sleep(sleep)
else:
await asyncio.sleep(sleep)
if scrolldown:
await page._keyboard.up('PageDown')
# Return the content of the page, JavaScript evaluated.
content = await page.content()
if not keep_page:
await page.close()
page = None
return content, result, page
except TimeoutError:
await page.close()
page = None
return None | Handle page creation and js rendering. Internal use for render/arender methods. | train | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L502-L539 | null | class HTML(BaseParser):
"""An HTML document, ready for parsing.
:param url: The URL from which the HTML originated, used for ``absolute_links``.
:param html: HTML from which to base the parsing upon (optional).
:param default_encoding: Which encoding to default to.
"""
def __init__(self, *, session: Union['HTMLSession', 'AsyncHTMLSession'] = None, url: str = DEFAULT_URL, html: _HTML, default_encoding: str = DEFAULT_ENCODING, async_: bool = False) -> None:
# Convert incoming unicode HTML into bytes.
if isinstance(html, str):
html = html.encode(DEFAULT_ENCODING)
pq = PyQuery(html)
super(HTML, self).__init__(
element=pq('html') or pq.wrapAll('<html></html>')('html'),
html=html,
url=url,
default_encoding=default_encoding
)
self.session = session or async_ and AsyncHTMLSession() or HTMLSession()
self.page = None
self.next_symbol = DEFAULT_NEXT_SYMBOL
def __repr__(self) -> str:
return f"<HTML url={self.url!r}>"
def next(self, fetch: bool = False, next_symbol: _NextSymbol = DEFAULT_NEXT_SYMBOL) -> _Next:
"""Attempts to find the next page, if there is one. If ``fetch``
is ``True`` (default), returns :class:`HTML <HTML>` object of
next page. If ``fetch`` is ``False``, simply returns the next URL.
"""
def get_next():
candidates = self.find('a', containing=next_symbol)
for candidate in candidates:
if candidate.attrs.get('href'):
# Support 'next' rel (e.g. reddit).
if 'next' in candidate.attrs.get('rel', []):
return candidate.attrs['href']
# Support 'next' in classnames.
for _class in candidate.attrs.get('class', []):
if 'next' in _class:
return candidate.attrs['href']
if 'page' in candidate.attrs['href']:
return candidate.attrs['href']
try:
# Resort to the last candidate.
return candidates[-1].attrs['href']
except IndexError:
return None
__next = get_next()
if __next:
url = self._make_absolute(__next)
else:
return None
if fetch:
return self.session.get(url)
else:
return url
def __iter__(self):
next = self
while True:
yield next
try:
next = next.next(fetch=True, next_symbol=self.next_symbol).html
except AttributeError:
break
def __next__(self):
return self.next(fetch=True, next_symbol=self.next_symbol).html
def __aiter__(self):
return self
async def __anext__(self):
while True:
url = self.next(fetch=False, next_symbol=self.next_symbol)
if not url:
break
response = await self.session.get(url)
return response.html
def add_next_symbol(self, next_symbol):
self.next_symbol.append(next_symbol)
def render(self, retries: int = 8, script: str = None, wait: float = 0.2, scrolldown=False, sleep: int = 0, reload: bool = True, timeout: Union[float, int] = 8.0, keep_page: bool = False):
"""Reloads the response in Chromium, and replaces HTML content
with an updated version, with JavaScript executed.
:param retries: The number of times to retry loading the page in Chromium.
:param script: JavaScript to execute upon page load (optional).
:param wait: The number of seconds to wait before loading the page, preventing timeouts (optional).
:param scrolldown: Integer, if provided, of how many times to page down.
:param sleep: Integer, if provided, of how many long to sleep after initial render.
:param reload: If ``False``, content will not be loaded from the browser, but will be provided from memory.
:param keep_page: If ``True`` will allow you to interact with the browser page through ``r.html.page``.
If ``scrolldown`` is specified, the page will scrolldown the specified
number of times, after sleeping the specified amount of time
(e.g. ``scrolldown=10, sleep=1``).
If just ``sleep`` is provided, the rendering will wait *n* seconds, before
returning.
If ``script`` is specified, it will execute the provided JavaScript at
runtime. Example:
.. code-block:: python
script = \"\"\"
() => {
return {
width: document.documentElement.clientWidth,
height: document.documentElement.clientHeight,
deviceScaleFactor: window.devicePixelRatio,
}
}
\"\"\"
Returns the return value of the executed ``script``, if any is provided:
.. code-block:: python
>>> r.html.render(script=script)
{'width': 800, 'height': 600, 'deviceScaleFactor': 1}
Warning: the first time you run this method, it will download
Chromium into your home directory (``~/.pyppeteer``).
"""
self.browser = self.session.browser # Automatically create a event loop and browser
content = None
# Automatically set Reload to False, if example URL is being used.
if self.url == DEFAULT_URL:
reload = False
for i in range(retries):
if not content:
try:
content, result, page = self.session.loop.run_until_complete(self._async_render(url=self.url, script=script, sleep=sleep, wait=wait, content=self.html, reload=reload, scrolldown=scrolldown, timeout=timeout, keep_page=keep_page))
except TypeError:
pass
else:
break
if not content:
raise MaxRetries("Unable to render the page. Try increasing timeout")
html = HTML(url=self.url, html=content.encode(DEFAULT_ENCODING), default_encoding=DEFAULT_ENCODING)
self.__dict__.update(html.__dict__)
self.page = page
return result
async def arender(self, retries: int = 8, script: str = None, wait: float = 0.2, scrolldown=False, sleep: int = 0, reload: bool = True, timeout: Union[float, int] = 8.0, keep_page: bool = False):
""" Async version of render. Takes same parameters. """
self.browser = await self.session.browser
content = None
# Automatically set Reload to False, if example URL is being used.
if self.url == DEFAULT_URL:
reload = False
for _ in range(retries):
if not content:
try:
content, result, page = await self._async_render(url=self.url, script=script, sleep=sleep, wait=wait, content=self.html, reload=reload, scrolldown=scrolldown, timeout=timeout, keep_page=keep_page)
except TypeError:
pass
else:
break
if not content:
raise MaxRetries("Unable to render the page. Try increasing timeout")
html = HTML(url=self.url, html=content.encode(DEFAULT_ENCODING), default_encoding=DEFAULT_ENCODING)
self.__dict__.update(html.__dict__)
self.page = page
return result
|
kennethreitz/requests-html | requests_html.py | HTML.render | python | def render(self, retries: int = 8, script: str = None, wait: float = 0.2, scrolldown=False, sleep: int = 0, reload: bool = True, timeout: Union[float, int] = 8.0, keep_page: bool = False):
self.browser = self.session.browser # Automatically create a event loop and browser
content = None
# Automatically set Reload to False, if example URL is being used.
if self.url == DEFAULT_URL:
reload = False
for i in range(retries):
if not content:
try:
content, result, page = self.session.loop.run_until_complete(self._async_render(url=self.url, script=script, sleep=sleep, wait=wait, content=self.html, reload=reload, scrolldown=scrolldown, timeout=timeout, keep_page=keep_page))
except TypeError:
pass
else:
break
if not content:
raise MaxRetries("Unable to render the page. Try increasing timeout")
html = HTML(url=self.url, html=content.encode(DEFAULT_ENCODING), default_encoding=DEFAULT_ENCODING)
self.__dict__.update(html.__dict__)
self.page = page
return result | Reloads the response in Chromium, and replaces HTML content
with an updated version, with JavaScript executed.
:param retries: The number of times to retry loading the page in Chromium.
:param script: JavaScript to execute upon page load (optional).
:param wait: The number of seconds to wait before loading the page, preventing timeouts (optional).
:param scrolldown: Integer, if provided, of how many times to page down.
:param sleep: Integer, if provided, of how many long to sleep after initial render.
:param reload: If ``False``, content will not be loaded from the browser, but will be provided from memory.
:param keep_page: If ``True`` will allow you to interact with the browser page through ``r.html.page``.
If ``scrolldown`` is specified, the page will scrolldown the specified
number of times, after sleeping the specified amount of time
(e.g. ``scrolldown=10, sleep=1``).
If just ``sleep`` is provided, the rendering will wait *n* seconds, before
returning.
If ``script`` is specified, it will execute the provided JavaScript at
runtime. Example:
.. code-block:: python
script = \"\"\"
() => {
return {
width: document.documentElement.clientWidth,
height: document.documentElement.clientHeight,
deviceScaleFactor: window.devicePixelRatio,
}
}
\"\"\"
Returns the return value of the executed ``script``, if any is provided:
.. code-block:: python
>>> r.html.render(script=script)
{'width': 800, 'height': 600, 'deviceScaleFactor': 1}
Warning: the first time you run this method, it will download
Chromium into your home directory (``~/.pyppeteer``). | train | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L541-L610 | null | class HTML(BaseParser):
"""An HTML document, ready for parsing.
:param url: The URL from which the HTML originated, used for ``absolute_links``.
:param html: HTML from which to base the parsing upon (optional).
:param default_encoding: Which encoding to default to.
"""
def __init__(self, *, session: Union['HTMLSession', 'AsyncHTMLSession'] = None, url: str = DEFAULT_URL, html: _HTML, default_encoding: str = DEFAULT_ENCODING, async_: bool = False) -> None:
# Convert incoming unicode HTML into bytes.
if isinstance(html, str):
html = html.encode(DEFAULT_ENCODING)
pq = PyQuery(html)
super(HTML, self).__init__(
element=pq('html') or pq.wrapAll('<html></html>')('html'),
html=html,
url=url,
default_encoding=default_encoding
)
self.session = session or async_ and AsyncHTMLSession() or HTMLSession()
self.page = None
self.next_symbol = DEFAULT_NEXT_SYMBOL
def __repr__(self) -> str:
return f"<HTML url={self.url!r}>"
def next(self, fetch: bool = False, next_symbol: _NextSymbol = DEFAULT_NEXT_SYMBOL) -> _Next:
"""Attempts to find the next page, if there is one. If ``fetch``
is ``True`` (default), returns :class:`HTML <HTML>` object of
next page. If ``fetch`` is ``False``, simply returns the next URL.
"""
def get_next():
candidates = self.find('a', containing=next_symbol)
for candidate in candidates:
if candidate.attrs.get('href'):
# Support 'next' rel (e.g. reddit).
if 'next' in candidate.attrs.get('rel', []):
return candidate.attrs['href']
# Support 'next' in classnames.
for _class in candidate.attrs.get('class', []):
if 'next' in _class:
return candidate.attrs['href']
if 'page' in candidate.attrs['href']:
return candidate.attrs['href']
try:
# Resort to the last candidate.
return candidates[-1].attrs['href']
except IndexError:
return None
__next = get_next()
if __next:
url = self._make_absolute(__next)
else:
return None
if fetch:
return self.session.get(url)
else:
return url
def __iter__(self):
next = self
while True:
yield next
try:
next = next.next(fetch=True, next_symbol=self.next_symbol).html
except AttributeError:
break
def __next__(self):
return self.next(fetch=True, next_symbol=self.next_symbol).html
def __aiter__(self):
return self
async def __anext__(self):
while True:
url = self.next(fetch=False, next_symbol=self.next_symbol)
if not url:
break
response = await self.session.get(url)
return response.html
def add_next_symbol(self, next_symbol):
self.next_symbol.append(next_symbol)
async def _async_render(self, *, url: str, script: str = None, scrolldown, sleep: int, wait: float, reload, content: Optional[str], timeout: Union[float, int], keep_page: bool):
""" Handle page creation and js rendering. Internal use for render/arender methods. """
try:
page = await self.browser.newPage()
# Wait before rendering the page, to prevent timeouts.
await asyncio.sleep(wait)
# Load the given page (GET request, obviously.)
if reload:
await page.goto(url, options={'timeout': int(timeout * 1000)})
else:
await page.goto(f'data:text/html,{self.html}', options={'timeout': int(timeout * 1000)})
result = None
if script:
result = await page.evaluate(script)
if scrolldown:
for _ in range(scrolldown):
await page._keyboard.down('PageDown')
await asyncio.sleep(sleep)
else:
await asyncio.sleep(sleep)
if scrolldown:
await page._keyboard.up('PageDown')
# Return the content of the page, JavaScript evaluated.
content = await page.content()
if not keep_page:
await page.close()
page = None
return content, result, page
except TimeoutError:
await page.close()
page = None
return None
async def arender(self, retries: int = 8, script: str = None, wait: float = 0.2, scrolldown=False, sleep: int = 0, reload: bool = True, timeout: Union[float, int] = 8.0, keep_page: bool = False):
""" Async version of render. Takes same parameters. """
self.browser = await self.session.browser
content = None
# Automatically set Reload to False, if example URL is being used.
if self.url == DEFAULT_URL:
reload = False
for _ in range(retries):
if not content:
try:
content, result, page = await self._async_render(url=self.url, script=script, sleep=sleep, wait=wait, content=self.html, reload=reload, scrolldown=scrolldown, timeout=timeout, keep_page=keep_page)
except TypeError:
pass
else:
break
if not content:
raise MaxRetries("Unable to render the page. Try increasing timeout")
html = HTML(url=self.url, html=content.encode(DEFAULT_ENCODING), default_encoding=DEFAULT_ENCODING)
self.__dict__.update(html.__dict__)
self.page = page
return result
|
kennethreitz/requests-html | requests_html.py | BaseSession.response_hook | python | def response_hook(self, response, **kwargs) -> HTMLResponse:
if not response.encoding:
response.encoding = DEFAULT_ENCODING
return HTMLResponse._from_response(response, self) | Change response enconding and replace it by a HTMLResponse. | train | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L705-L709 | null | class BaseSession(requests.Session):
""" A consumable session, for cookie persistence and connection pooling,
amongst other things.
"""
def __init__(self, mock_browser : bool = True, verify : bool = True,
browser_args : list = ['--no-sandbox']):
super().__init__()
# Mock a web browser's user agent.
if mock_browser:
self.headers['User-Agent'] = user_agent()
self.hooks['response'].append(self.response_hook)
self.verify = verify
self.__browser_args = browser_args
@property
async def browser(self):
if not hasattr(self, "_browser"):
self._browser = await pyppeteer.launch(ignoreHTTPSErrors=not(self.verify), headless=True, args=self.__browser_args)
return self._browser
|
kennethreitz/requests-html | requests_html.py | HTMLSession.close | python | def close(self):
if hasattr(self, "_browser"):
self.loop.run_until_complete(self._browser.close())
super().close() | If a browser was created close it first. | train | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L733-L737 | null | class HTMLSession(BaseSession):
def __init__(self, **kwargs):
super(HTMLSession, self).__init__(**kwargs)
@property
def browser(self):
if not hasattr(self, "_browser"):
self.loop = asyncio.get_event_loop()
if self.loop.is_running():
raise RuntimeError("Cannot use HTMLSession within an existing event loop. Use AsyncHTMLSession instead.")
self._browser = self.loop.run_until_complete(super().browser)
return self._browser
|
kennethreitz/requests-html | requests_html.py | AsyncHTMLSession.request | python | def request(self, *args, **kwargs):
func = partial(super().request, *args, **kwargs)
return self.loop.run_in_executor(self.thread_pool, func) | Partial original request func and run it in a thread. | train | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L756-L759 | null | class AsyncHTMLSession(BaseSession):
""" An async consumable session. """
def __init__(self, loop=None, workers=None,
mock_browser: bool = True, *args, **kwargs):
""" Set or create an event loop and a thread pool.
:param loop: Asyncio loop to use.
:param workers: Amount of threads to use for executing async calls.
If not pass it will default to the number of processors on the
machine, multiplied by 5. """
super().__init__(*args, **kwargs)
self.loop = loop or asyncio.get_event_loop()
self.thread_pool = ThreadPoolExecutor(max_workers=workers)
async def close(self):
""" If a browser was created close it first. """
if hasattr(self, "_browser"):
await self._browser.close()
super().close()
def run(self, *coros):
""" Pass in all the coroutines you want to run, it will wrap each one
in a task, run it and wait for the result. Return a list with all
results, this is returned in the same order coros are passed in. """
tasks = [
asyncio.ensure_future(coro()) for coro in coros
]
done, _ = self.loop.run_until_complete(asyncio.wait(tasks))
return [t.result() for t in done]
|
kennethreitz/requests-html | requests_html.py | AsyncHTMLSession.run | python | def run(self, *coros):
tasks = [
asyncio.ensure_future(coro()) for coro in coros
]
done, _ = self.loop.run_until_complete(asyncio.wait(tasks))
return [t.result() for t in done] | Pass in all the coroutines you want to run, it will wrap each one
in a task, run it and wait for the result. Return a list with all
results, this is returned in the same order coros are passed in. | train | https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L767-L775 | null | class AsyncHTMLSession(BaseSession):
""" An async consumable session. """
def __init__(self, loop=None, workers=None,
mock_browser: bool = True, *args, **kwargs):
""" Set or create an event loop and a thread pool.
:param loop: Asyncio loop to use.
:param workers: Amount of threads to use for executing async calls.
If not pass it will default to the number of processors on the
machine, multiplied by 5. """
super().__init__(*args, **kwargs)
self.loop = loop or asyncio.get_event_loop()
self.thread_pool = ThreadPoolExecutor(max_workers=workers)
def request(self, *args, **kwargs):
""" Partial original request func and run it in a thread. """
func = partial(super().request, *args, **kwargs)
return self.loop.run_in_executor(self.thread_pool, func)
async def close(self):
""" If a browser was created close it first. """
if hasattr(self, "_browser"):
await self._browser.close()
super().close()
|
AirtestProject/Airtest | playground/win_ide.py | WindowsInIDE.connect | python | def connect(self, **kwargs):
self.app = self._app.connect(**kwargs)
try:
self._top_window = self.app.top_window().wrapper_object()
self.set_foreground()
except RuntimeError:
self._top_window = None | Connect to window and set it foreground
Args:
**kwargs: optional arguments
Returns:
None | train | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/playground/win_ide.py#L19-L35 | null | class WindowsInIDE(Windows):
"""Windows Device in Airtest-IDE"""
def __init__(self, handle=None, dpifactor=1, **kwargs):
if isinstance(handle, str) and handle.isdigit():
handle = int(handle)
super(WindowsInIDE, self).__init__(handle, dpifactor=dpifactor, **kwargs)
self.handle = handle
def get_rect(self):
"""
Get rectangle of app or desktop resolution
Returns:
RECT(left, top, right, bottom)
"""
if self.handle:
left, top, right, bottom = win32gui.GetWindowRect(self.handle)
return RECT(left, top, right, bottom)
else:
desktop = win32gui.GetDesktopWindow()
left, top, right, bottom = win32gui.GetWindowRect(desktop)
return RECT(left, top, right, bottom)
def snapshot(self, filename="tmp.png"):
"""
Take a screenshot and save it to `tmp.png` filename by default
Args:
filename: name of file where to store the screenshot
Returns:
display the screenshot
"""
if not filename:
filename = "tmp.png"
if self.handle:
try:
screenshot(filename, self.handle)
except win32gui.error:
self.handle = None
screenshot(filename)
else:
screenshot(filename)
img = aircv.imread(filename)
os.remove(filename)
return img
|
AirtestProject/Airtest | playground/win_ide.py | WindowsInIDE.get_rect | python | def get_rect(self):
if self.handle:
left, top, right, bottom = win32gui.GetWindowRect(self.handle)
return RECT(left, top, right, bottom)
else:
desktop = win32gui.GetDesktopWindow()
left, top, right, bottom = win32gui.GetWindowRect(desktop)
return RECT(left, top, right, bottom) | Get rectangle of app or desktop resolution
Returns:
RECT(left, top, right, bottom) | train | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/playground/win_ide.py#L37-L51 | null | class WindowsInIDE(Windows):
"""Windows Device in Airtest-IDE"""
def __init__(self, handle=None, dpifactor=1, **kwargs):
if isinstance(handle, str) and handle.isdigit():
handle = int(handle)
super(WindowsInIDE, self).__init__(handle, dpifactor=dpifactor, **kwargs)
self.handle = handle
def connect(self, **kwargs):
"""
Connect to window and set it foreground
Args:
**kwargs: optional arguments
Returns:
None
"""
self.app = self._app.connect(**kwargs)
try:
self._top_window = self.app.top_window().wrapper_object()
self.set_foreground()
except RuntimeError:
self._top_window = None
def snapshot(self, filename="tmp.png"):
"""
Take a screenshot and save it to `tmp.png` filename by default
Args:
filename: name of file where to store the screenshot
Returns:
display the screenshot
"""
if not filename:
filename = "tmp.png"
if self.handle:
try:
screenshot(filename, self.handle)
except win32gui.error:
self.handle = None
screenshot(filename)
else:
screenshot(filename)
img = aircv.imread(filename)
os.remove(filename)
return img
|
AirtestProject/Airtest | playground/win_ide.py | WindowsInIDE.snapshot | python | def snapshot(self, filename="tmp.png"):
if not filename:
filename = "tmp.png"
if self.handle:
try:
screenshot(filename, self.handle)
except win32gui.error:
self.handle = None
screenshot(filename)
else:
screenshot(filename)
img = aircv.imread(filename)
os.remove(filename)
return img | Take a screenshot and save it to `tmp.png` filename by default
Args:
filename: name of file where to store the screenshot
Returns:
display the screenshot | train | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/playground/win_ide.py#L53-L78 | [
"def imread(filename):\n \"\"\"根据图片路径,将图片读取为cv2的图片处理格式.\"\"\"\n if not os.path.isfile(filename):\n raise FileNotExistError(\"File not exist: %s\" % filename)\n if PY3:\n img = cv2.imdecode(np.fromfile(filename, dtype=np.uint8), cv2.IMREAD_UNCHANGED)\n else:\n filename = filename.encode(sys.getfilesystemencoding())\n img = cv2.imread(filename, 1)\n return img\n",
"def screenshot(filename, hwnd=None):\n \"\"\"\n Take the screenshot of Windows app\n\n Args:\n filename: file name where to store the screenshot\n hwnd:\n\n Returns:\n bitmap screenshot file\n\n \"\"\"\n # import ctypes\n # user32 = ctypes.windll.user32\n # user32.SetProcessDPIAware()\n\n if hwnd is None:\n \"\"\"all screens\"\"\"\n hwnd = win32gui.GetDesktopWindow()\n # get complete virtual screen including all monitors\n w = win32api.GetSystemMetrics(SM_CXVIRTUALSCREEN)\n h = win32api.GetSystemMetrics(SM_CYVIRTUALSCREEN)\n x = win32api.GetSystemMetrics(SM_XVIRTUALSCREEN)\n y = win32api.GetSystemMetrics(SM_YVIRTUALSCREEN)\n else:\n \"\"\"window\"\"\"\n rect = win32gui.GetWindowRect(hwnd)\n w = abs(rect[2] - rect[0])\n h = abs(rect[3] - rect[1])\n x, y = 0, 0\n hwndDC = win32gui.GetWindowDC(hwnd)\n mfcDC = win32ui.CreateDCFromHandle(hwndDC)\n saveDC = mfcDC.CreateCompatibleDC()\n saveBitMap = win32ui.CreateBitmap()\n saveBitMap.CreateCompatibleBitmap(mfcDC, w, h)\n saveDC.SelectObject(saveBitMap)\n saveDC.BitBlt((0, 0), (w, h), mfcDC, (x, y), win32con.SRCCOPY)\n # saveBitMap.SaveBitmapFile(saveDC, filename)\n bmpinfo = saveBitMap.GetInfo()\n bmpstr = saveBitMap.GetBitmapBits(True)\n pil_image = Image.frombuffer(\n 'RGB',\n (bmpinfo['bmWidth'], bmpinfo['bmHeight']),\n bmpstr, 'raw', 'BGRX', 0, 1)\n cv2_image = pil_2_cv2(pil_image)\n\n mfcDC.DeleteDC()\n saveDC.DeleteDC()\n win32gui.ReleaseDC(hwnd, hwndDC)\n win32gui.DeleteObject(saveBitMap.GetHandle())\n return cv2_image\n"
] | class WindowsInIDE(Windows):
"""Windows Device in Airtest-IDE"""
def __init__(self, handle=None, dpifactor=1, **kwargs):
if isinstance(handle, str) and handle.isdigit():
handle = int(handle)
super(WindowsInIDE, self).__init__(handle, dpifactor=dpifactor, **kwargs)
self.handle = handle
def connect(self, **kwargs):
"""
Connect to window and set it foreground
Args:
**kwargs: optional arguments
Returns:
None
"""
self.app = self._app.connect(**kwargs)
try:
self._top_window = self.app.top_window().wrapper_object()
self.set_foreground()
except RuntimeError:
self._top_window = None
def get_rect(self):
"""
Get rectangle of app or desktop resolution
Returns:
RECT(left, top, right, bottom)
"""
if self.handle:
left, top, right, bottom = win32gui.GetWindowRect(self.handle)
return RECT(left, top, right, bottom)
else:
desktop = win32gui.GetDesktopWindow()
left, top, right, bottom = win32gui.GetWindowRect(desktop)
return RECT(left, top, right, bottom)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.