id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
171,310 | from contextlib import contextmanager
import errno
from io import BytesIO
import json
import mimetypes
from pathlib import Path
import random
import sys
import signal
import socket
import threading
try:
import tornado
except ImportError as err:
raise RuntimeError("The WebAgg backend requires Tornado.") from err
import tornado.web
import tornado.ioloop
import tornado.websocket
import matplotlib as mpl
from matplotlib.backend_bases import _Backend
from matplotlib._pylab_helpers import Gcf
from . import backend_webagg_core as core
from .backend_webagg_core import ( # noqa: F401 # pylint: disable=W0611
TimerAsyncio, TimerTornado)
webagg_server_thread = threading.Thread(
target=lambda: tornado.ioloop.IOLoop.instance().start())
class FigureManagerWebAgg(core.FigureManagerWebAgg):
_toolbar2_class = core.NavigationToolbar2WebAgg
def pyplot_show(cls, *, block=None):
WebAggApplication.initialize()
url = "http://{address}:{port}{prefix}".format(
address=WebAggApplication.address,
port=WebAggApplication.port,
prefix=WebAggApplication.url_prefix)
if mpl.rcParams['webagg.open_in_browser']:
import webbrowser
if not webbrowser.open(url):
print("To view figure, visit {0}".format(url))
else:
print("To view figure, visit {0}".format(url))
WebAggApplication.start()
class WebAggApplication(tornado.web.Application):
initialized = False
started = False
class FavIcon(tornado.web.RequestHandler):
def get(self):
self.set_header('Content-Type', 'image/png')
self.write(Path(mpl.get_data_path(),
'images/matplotlib.png').read_bytes())
class SingleFigurePage(tornado.web.RequestHandler):
def __init__(self, application, request, *, url_prefix='', **kwargs):
self.url_prefix = url_prefix
super().__init__(application, request, **kwargs)
def get(self, fignum):
fignum = int(fignum)
manager = Gcf.get_fig_manager(fignum)
ws_uri = 'ws://{req.host}{prefix}/'.format(req=self.request,
prefix=self.url_prefix)
self.render(
"single_figure.html",
prefix=self.url_prefix,
ws_uri=ws_uri,
fig_id=fignum,
toolitems=core.NavigationToolbar2WebAgg.toolitems,
canvas=manager.canvas)
class AllFiguresPage(tornado.web.RequestHandler):
def __init__(self, application, request, *, url_prefix='', **kwargs):
self.url_prefix = url_prefix
super().__init__(application, request, **kwargs)
def get(self):
ws_uri = 'ws://{req.host}{prefix}/'.format(req=self.request,
prefix=self.url_prefix)
self.render(
"all_figures.html",
prefix=self.url_prefix,
ws_uri=ws_uri,
figures=sorted(Gcf.figs.items()),
toolitems=core.NavigationToolbar2WebAgg.toolitems)
class MplJs(tornado.web.RequestHandler):
def get(self):
self.set_header('Content-Type', 'application/javascript')
js_content = core.FigureManagerWebAgg.get_javascript()
self.write(js_content)
class Download(tornado.web.RequestHandler):
def get(self, fignum, fmt):
fignum = int(fignum)
manager = Gcf.get_fig_manager(fignum)
self.set_header(
'Content-Type', mimetypes.types_map.get(fmt, 'binary'))
buff = BytesIO()
manager.canvas.figure.savefig(buff, format=fmt)
self.write(buff.getvalue())
class WebSocket(tornado.websocket.WebSocketHandler):
supports_binary = True
def open(self, fignum):
self.fignum = int(fignum)
self.manager = Gcf.get_fig_manager(self.fignum)
self.manager.add_web_socket(self)
if hasattr(self, 'set_nodelay'):
self.set_nodelay(True)
def on_close(self):
self.manager.remove_web_socket(self)
def on_message(self, message):
message = json.loads(message)
# The 'supports_binary' message is on a client-by-client
# basis. The others affect the (shared) canvas as a
# whole.
if message['type'] == 'supports_binary':
self.supports_binary = message['value']
else:
manager = Gcf.get_fig_manager(self.fignum)
# It is possible for a figure to be closed,
# but a stale figure UI is still sending messages
# from the browser.
if manager is not None:
manager.handle_json(message)
def send_json(self, content):
self.write_message(json.dumps(content))
def send_binary(self, blob):
if self.supports_binary:
self.write_message(blob, binary=True)
else:
data_uri = "data:image/png;base64,{0}".format(
blob.encode('base64').replace('\n', ''))
self.write_message(data_uri)
def __init__(self, url_prefix=''):
if url_prefix:
assert url_prefix[0] == '/' and url_prefix[-1] != '/', \
'url_prefix must start with a "/" and not end with one.'
super().__init__(
[
# Static files for the CSS and JS
(url_prefix + r'/_static/(.*)',
tornado.web.StaticFileHandler,
{'path': core.FigureManagerWebAgg.get_static_file_path()}),
# Static images for the toolbar
(url_prefix + r'/_images/(.*)',
tornado.web.StaticFileHandler,
{'path': Path(mpl.get_data_path(), 'images')}),
# A Matplotlib favicon
(url_prefix + r'/favicon.ico', self.FavIcon),
# The page that contains all of the pieces
(url_prefix + r'/([0-9]+)', self.SingleFigurePage,
{'url_prefix': url_prefix}),
# The page that contains all of the figures
(url_prefix + r'/?', self.AllFiguresPage,
{'url_prefix': url_prefix}),
(url_prefix + r'/js/mpl.js', self.MplJs),
# Sends images and events to the browser, and receives
# events from the browser
(url_prefix + r'/([0-9]+)/ws', self.WebSocket),
# Handles the downloading (i.e., saving) of static images
(url_prefix + r'/([0-9]+)/download.([a-z0-9.]+)',
self.Download),
],
template_path=core.FigureManagerWebAgg.get_static_file_path())
def initialize(cls, url_prefix='', port=None, address=None):
if cls.initialized:
return
# Create the class instance
app = cls(url_prefix=url_prefix)
cls.url_prefix = url_prefix
# This port selection algorithm is borrowed, more or less
# verbatim, from IPython.
def random_ports(port, n):
"""
Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n - 5):
yield port + random.randint(-2 * n, 2 * n)
if address is None:
cls.address = mpl.rcParams['webagg.address']
else:
cls.address = address
cls.port = mpl.rcParams['webagg.port']
for port in random_ports(cls.port,
mpl.rcParams['webagg.port_retries']):
try:
app.listen(port, cls.address)
except socket.error as e:
if e.errno != errno.EADDRINUSE:
raise
else:
cls.port = port
break
else:
raise SystemExit(
"The webagg server could not be started because an available "
"port could not be found")
cls.initialized = True
def start(cls):
import asyncio
try:
asyncio.get_running_loop()
except RuntimeError:
pass
else:
cls.started = True
if cls.started:
return
"""
IOLoop.running() was removed as of Tornado 2.4; see for example
https://groups.google.com/forum/#!topic/python-tornado/QLMzkpQBGOY
Thus there is no correct way to check if the loop has already been
launched. We may end up with two concurrently running loops in that
unlucky case with all the expected consequences.
"""
ioloop = tornado.ioloop.IOLoop.instance()
def shutdown():
ioloop.stop()
print("Server is stopped")
sys.stdout.flush()
cls.started = False
def catch_sigint():
old_handler = signal.signal(
signal.SIGINT,
lambda sig, frame: ioloop.add_callback_from_signal(shutdown))
try:
yield
finally:
signal.signal(signal.SIGINT, old_handler)
# Set the flag to True *before* blocking on ioloop.start()
cls.started = True
print("Press Ctrl+C to stop WebAgg server")
sys.stdout.flush()
with catch_sigint():
ioloop.start()
class Path(PurePath):
def __new__(cls: Type[_P], *args: Union[str, _PathLike], **kwargs: Any) -> _P: ...
def __enter__(self: _P) -> _P: ...
def __exit__(
self, exc_type: Optional[Type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType]
) -> Optional[bool]: ...
def cwd(cls: Type[_P]) -> _P: ...
def stat(self) -> os.stat_result: ...
def chmod(self, mode: int) -> None: ...
def exists(self) -> bool: ...
def glob(self: _P, pattern: str) -> Generator[_P, None, None]: ...
def group(self) -> str: ...
def is_dir(self) -> bool: ...
def is_file(self) -> bool: ...
if sys.version_info >= (3, 7):
def is_mount(self) -> bool: ...
def is_symlink(self) -> bool: ...
def is_socket(self) -> bool: ...
def is_fifo(self) -> bool: ...
def is_block_device(self) -> bool: ...
def is_char_device(self) -> bool: ...
def iterdir(self: _P) -> Generator[_P, None, None]: ...
def lchmod(self, mode: int) -> None: ...
def lstat(self) -> os.stat_result: ...
def mkdir(self, mode: int = ..., parents: bool = ..., exist_ok: bool = ...) -> None: ...
# Adapted from builtins.open
# Text mode: always returns a TextIOWrapper
def open(
self,
mode: OpenTextMode = ...,
buffering: int = ...,
encoding: Optional[str] = ...,
errors: Optional[str] = ...,
newline: Optional[str] = ...,
) -> TextIOWrapper: ...
# Unbuffered binary mode: returns a FileIO
def open(
self, mode: OpenBinaryMode, buffering: Literal[0], encoding: None = ..., errors: None = ..., newline: None = ...
) -> FileIO: ...
# Buffering is on: return BufferedRandom, BufferedReader, or BufferedWriter
def open(
self,
mode: OpenBinaryModeUpdating,
buffering: Literal[-1, 1] = ...,
encoding: None = ...,
errors: None = ...,
newline: None = ...,
) -> BufferedRandom: ...
def open(
self,
mode: OpenBinaryModeWriting,
buffering: Literal[-1, 1] = ...,
encoding: None = ...,
errors: None = ...,
newline: None = ...,
) -> BufferedWriter: ...
def open(
self,
mode: OpenBinaryModeReading,
buffering: Literal[-1, 1] = ...,
encoding: None = ...,
errors: None = ...,
newline: None = ...,
) -> BufferedReader: ...
# Buffering cannot be determined: fall back to BinaryIO
def open(
self, mode: OpenBinaryMode, buffering: int, encoding: None = ..., errors: None = ..., newline: None = ...
) -> BinaryIO: ...
# Fallback if mode is not specified
def open(
self,
mode: str,
buffering: int = ...,
encoding: Optional[str] = ...,
errors: Optional[str] = ...,
newline: Optional[str] = ...,
) -> IO[Any]: ...
def owner(self) -> str: ...
if sys.version_info >= (3, 9):
def readlink(self: _P) -> _P: ...
if sys.version_info >= (3, 8):
def rename(self: _P, target: Union[str, PurePath]) -> _P: ...
def replace(self: _P, target: Union[str, PurePath]) -> _P: ...
else:
def rename(self, target: Union[str, PurePath]) -> None: ...
def replace(self, target: Union[str, PurePath]) -> None: ...
def resolve(self: _P, strict: bool = ...) -> _P: ...
def rglob(self: _P, pattern: str) -> Generator[_P, None, None]: ...
def rmdir(self) -> None: ...
def symlink_to(self, target: Union[str, Path], target_is_directory: bool = ...) -> None: ...
def touch(self, mode: int = ..., exist_ok: bool = ...) -> None: ...
if sys.version_info >= (3, 8):
def unlink(self, missing_ok: bool = ...) -> None: ...
else:
def unlink(self) -> None: ...
def home(cls: Type[_P]) -> _P: ...
def absolute(self: _P) -> _P: ...
def expanduser(self: _P) -> _P: ...
def read_bytes(self) -> bytes: ...
def read_text(self, encoding: Optional[str] = ..., errors: Optional[str] = ...) -> str: ...
def samefile(self, other_path: Union[str, bytes, int, Path]) -> bool: ...
def write_bytes(self, data: bytes) -> int: ...
def write_text(self, data: str, encoding: Optional[str] = ..., errors: Optional[str] = ...) -> int: ...
if sys.version_info >= (3, 8):
def link_to(self, target: Union[str, bytes, os.PathLike[str]]) -> None: ...
def ipython_inline_display(figure):
import tornado.template
WebAggApplication.initialize()
import asyncio
try:
asyncio.get_running_loop()
except RuntimeError:
if not webagg_server_thread.is_alive():
webagg_server_thread.start()
fignum = figure.number
tpl = Path(core.FigureManagerWebAgg.get_static_file_path(),
"ipython_inline_figure.html").read_text()
t = tornado.template.Template(tpl)
return t.generate(
prefix=WebAggApplication.url_prefix,
fig_id=fignum,
toolitems=core.NavigationToolbar2WebAgg.toolitems,
canvas=figure.canvas,
port=WebAggApplication.port).decode('utf-8') | null |
171,311 | import uuid
import weakref
from contextlib import contextmanager
import logging
import math
import os.path
import sys
import tkinter as tk
import tkinter.filedialog
import tkinter.font
import tkinter.messagebox
from tkinter.simpledialog import SimpleDialog
import numpy as np
from PIL import Image, ImageTk
import matplotlib as mpl
from matplotlib import _api, backend_tools, cbook, _c_internal_utils
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, NavigationToolbar2,
TimerBase, ToolContainerBase, cursors, _Mode,
CloseEvent, KeyEvent, LocationEvent, MouseEvent, ResizeEvent)
from matplotlib._pylab_helpers import Gcf
from . import _tkagg
def _restore_foreground_window_at_end():
foreground = _c_internal_utils.Win32_GetForegroundWindow()
try:
yield
finally:
if mpl.rcParams['tk.window_focus']:
_c_internal_utils.Win32_SetForegroundWindow(foreground) | null |
171,312 | import copy
import datetime
import logging
from numbers import Integral, Real
from matplotlib import _api, colors as mcolors
from matplotlib.backends.qt_compat import (
QtGui, QtWidgets, QtCore, _enum, _to_int)
The provided code snippet includes necessary dependencies for implementing the `to_qcolor` function. Write a Python function `def to_qcolor(color)` to solve the following problem:
Create a QColor from a matplotlib color
Here is the function:
def to_qcolor(color):
"""Create a QColor from a matplotlib color"""
qcolor = QtGui.QColor()
try:
rgba = mcolors.to_rgba(color)
except ValueError:
_api.warn_external(f'Ignoring invalid color {color!r}')
return qcolor # return invalid QColor
qcolor.setRgbF(*rgba)
return qcolor | Create a QColor from a matplotlib color |
171,313 | import copy
import datetime
import logging
from numbers import Integral, Real
from matplotlib import _api, colors as mcolors
from matplotlib.backends.qt_compat import (
QtGui, QtWidgets, QtCore, _enum, _to_int)
def font_is_installed(font):
"""Check if font is installed"""
return [fam for fam in QtGui.QFontDatabase().families()
if str(fam) == font]
class Integral(Rational):
if sys.version_info >= (3, 0):
def __int__(self) -> int: ...
else:
def __long__(self) -> long: ...
def __index__(self) -> int: ...
def __pow__(self, exponent: Any, modulus: Optional[Any] = ...) -> Any: ...
def __lshift__(self, other: Any) -> Any: ...
def __rlshift__(self, other: Any) -> Any: ...
def __rshift__(self, other: Any) -> Any: ...
def __rrshift__(self, other: Any) -> Any: ...
def __and__(self, other: Any) -> Any: ...
def __rand__(self, other: Any) -> Any: ...
def __xor__(self, other: Any) -> Any: ...
def __rxor__(self, other: Any) -> Any: ...
def __or__(self, other: Any) -> Any: ...
def __ror__(self, other: Any) -> Any: ...
def __invert__(self) -> Any: ...
def __float__(self) -> float: ...
def numerator(self) -> int: ...
def denominator(self) -> int: ...
The provided code snippet includes necessary dependencies for implementing the `tuple_to_qfont` function. Write a Python function `def tuple_to_qfont(tup)` to solve the following problem:
Create a QFont from tuple: (family [string], size [int], italic [bool], bold [bool])
Here is the function:
def tuple_to_qfont(tup):
"""
Create a QFont from tuple:
(family [string], size [int], italic [bool], bold [bool])
"""
if not (isinstance(tup, tuple) and len(tup) == 4
and font_is_installed(tup[0])
and isinstance(tup[1], Integral)
and isinstance(tup[2], bool)
and isinstance(tup[3], bool)):
return None
font = QtGui.QFont()
family, size, italic, bold = tup
font.setFamily(family)
font.setPointSize(size)
font.setItalic(italic)
font.setBold(bold)
return font | Create a QFont from tuple: (family [string], size [int], italic [bool], bold [bool]) |
171,314 | import copy
import datetime
import logging
from numbers import Integral, Real
from matplotlib import _api, colors as mcolors
from matplotlib.backends.qt_compat import (
QtGui, QtWidgets, QtCore, _enum, _to_int)
def qfont_to_tuple(font):
return (str(font.family()), int(font.pointSize()),
font.italic(), font.bold()) | null |
171,315 | import copy
import datetime
import logging
from numbers import Integral, Real
from matplotlib import _api, colors as mcolors
from matplotlib.backends.qt_compat import (
QtGui, QtWidgets, QtCore, _enum, _to_int)
def _enum(name):
# foo.bar.Enum.Entry (PyQt6) <=> foo.bar.Entry (non-PyQt6).
return operator.attrgetter(
name if QT_API == 'PyQt6' else name.rpartition(".")[0]
)(sys.modules[QtCore.__package__])
def is_edit_valid(edit):
text = edit.text()
state = edit.validator().validate(text, 0)[0]
return state == _enum("QtGui.QDoubleValidator.State").Acceptable | null |
171,316 | import copy
import datetime
import logging
from numbers import Integral, Real
from matplotlib import _api, colors as mcolors
from matplotlib.backends.qt_compat import (
QtGui, QtWidgets, QtCore, _enum, _to_int)
def create_datagroup_example():
datalist = create_datalist_example()
return ((datalist, "Category 1", "Category 1 comment"),
(datalist, "Category 2", "Category 2 comment"),
(datalist, "Category 3", "Category 3 comment")) | null |
171,317 | import copy
import datetime
import logging
from numbers import Integral, Real
from matplotlib import _api, colors as mcolors
from matplotlib.backends.qt_compat import (
QtGui, QtWidgets, QtCore, _enum, _to_int)
def apply_test(data):
print("data:", data) | null |
171,318 | from itertools import chain
from matplotlib import cbook, cm, colors as mcolors, markers, image as mimage
from matplotlib.backends.qt_compat import QtGui
from matplotlib.backends.qt_editor import _formlayout
from matplotlib.dates import DateConverter, num2date
LINESTYLES = {'-': 'Solid',
'--': 'Dashed',
'-.': 'DashDot',
':': 'Dotted',
'None': 'None',
}
DRAWSTYLES = {
'default': 'Default',
'steps-pre': 'Steps (Pre)', 'steps': 'Steps (Pre)',
'steps-mid': 'Steps (Mid)',
'steps-post': 'Steps (Post)'}
MARKERS = markers.MarkerStyle.markers
class chain(Iterator[_T], Generic[_T]):
def __init__(self, *iterables: Iterable[_T]) -> None: ...
def __next__(self) -> _T: ...
def __iter__(self) -> Iterator[_T]: ...
def from_iterable(iterable: Iterable[Iterable[_S]]) -> Iterator[_S]: ...
def num2date(x, tz=None):
"""
Convert Matplotlib dates to `~datetime.datetime` objects.
Parameters
----------
x : float or sequence of floats
Number of days (fraction part represents hours, minutes, seconds)
since the epoch. See `.get_epoch` for the
epoch, which can be changed by :rc:`date.epoch` or `.set_epoch`.
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Timezone of *x*. If a string, *tz* is passed to `dateutil.tz`.
Returns
-------
`~datetime.datetime` or sequence of `~datetime.datetime`
Dates are returned in timezone *tz*.
If *x* is a sequence, a sequence of `~datetime.datetime` objects will
be returned.
Notes
-----
The Gregorian calendar is assumed; this is not universal practice.
For details, see the module docstring.
"""
tz = _get_tzinfo(tz)
return _from_ordinalf_np_vectorized(x, tz).tolist()
class DateConverter(units.ConversionInterface):
"""
Converter for `datetime.date` and `datetime.datetime` data, or for
date/time data represented as it would be converted by `date2num`.
The 'unit' tag for such data is None or a `~datetime.tzinfo` instance.
"""
def __init__(self, *, interval_multiples=True):
self._interval_multiples = interval_multiples
super().__init__()
def axisinfo(self, unit, axis):
"""
Return the `~matplotlib.units.AxisInfo` for *unit*.
*unit* is a `~datetime.tzinfo` instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz,
interval_multiples=self._interval_multiples)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(1970, 1, 1)
datemax = datetime.date(1970, 1, 2)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers, convert it
with `date2num`.
The *unit* and *axis* arguments are not used.
"""
return date2num(value)
def default_units(x, axis):
"""
Return the `~datetime.tzinfo` instance of *x* or of its first element,
or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook._safe_first_finite(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
The provided code snippet includes necessary dependencies for implementing the `figure_edit` function. Write a Python function `def figure_edit(axes, parent=None)` to solve the following problem:
Edit matplotlib figure options
Here is the function:
def figure_edit(axes, parent=None):
"""Edit matplotlib figure options"""
sep = (None, None) # separator
# Get / General
def convert_limits(lim, converter):
"""Convert axis limits for correct input editors."""
if isinstance(converter, DateConverter):
return map(num2date, lim)
# Cast to builtin floats as they have nicer reprs.
return map(float, lim)
axis_map = axes._axis_map
axis_limits = {
name: tuple(convert_limits(
getattr(axes, f'get_{name}lim')(), axis.converter
))
for name, axis in axis_map.items()
}
general = [
('Title', axes.get_title()),
sep,
*chain.from_iterable([
(
(None, f"<b>{name.title()}-Axis</b>"),
('Min', axis_limits[name][0]),
('Max', axis_limits[name][1]),
('Label', axis.get_label().get_text()),
('Scale', [axis.get_scale(),
'linear', 'log', 'symlog', 'logit']),
sep,
)
for name, axis in axis_map.items()
]),
('(Re-)Generate automatic legend', False),
]
# Save the converter and unit data
axis_converter = {
name: axis.converter
for name, axis in axis_map.items()
}
axis_units = {
name: axis.get_units()
for name, axis in axis_map.items()
}
# Get / Curves
labeled_lines = []
for line in axes.get_lines():
label = line.get_label()
if label == '_nolegend_':
continue
labeled_lines.append((label, line))
curves = []
def prepare_data(d, init):
"""
Prepare entry for FormLayout.
*d* is a mapping of shorthands to style names (a single style may
have multiple shorthands, in particular the shorthands `None`,
`"None"`, `"none"` and `""` are synonyms); *init* is one shorthand
of the initial style.
This function returns an list suitable for initializing a
FormLayout combobox, namely `[initial_name, (shorthand,
style_name), (shorthand, style_name), ...]`.
"""
if init not in d:
d = {**d, init: str(init)}
# Drop duplicate shorthands from dict (by overwriting them during
# the dict comprehension).
name2short = {name: short for short, name in d.items()}
# Convert back to {shorthand: name}.
short2name = {short: name for name, short in name2short.items()}
# Find the kept shorthand for the style specified by init.
canonical_init = name2short[d[init]]
# Sort by representation and prepend the initial value.
return ([canonical_init] +
sorted(short2name.items(),
key=lambda short_and_name: short_and_name[1]))
for label, line in labeled_lines:
color = mcolors.to_hex(
mcolors.to_rgba(line.get_color(), line.get_alpha()),
keep_alpha=True)
ec = mcolors.to_hex(
mcolors.to_rgba(line.get_markeredgecolor(), line.get_alpha()),
keep_alpha=True)
fc = mcolors.to_hex(
mcolors.to_rgba(line.get_markerfacecolor(), line.get_alpha()),
keep_alpha=True)
curvedata = [
('Label', label),
sep,
(None, '<b>Line</b>'),
('Line style', prepare_data(LINESTYLES, line.get_linestyle())),
('Draw style', prepare_data(DRAWSTYLES, line.get_drawstyle())),
('Width', line.get_linewidth()),
('Color (RGBA)', color),
sep,
(None, '<b>Marker</b>'),
('Style', prepare_data(MARKERS, line.get_marker())),
('Size', line.get_markersize()),
('Face color (RGBA)', fc),
('Edge color (RGBA)', ec)]
curves.append([curvedata, label, ""])
# Is there a curve displayed?
has_curve = bool(curves)
# Get ScalarMappables.
labeled_mappables = []
for mappable in [*axes.images, *axes.collections]:
label = mappable.get_label()
if label == '_nolegend_' or mappable.get_array() is None:
continue
labeled_mappables.append((label, mappable))
mappables = []
cmaps = [(cmap, name) for name, cmap in sorted(cm._colormaps.items())]
for label, mappable in labeled_mappables:
cmap = mappable.get_cmap()
if cmap not in cm._colormaps.values():
cmaps = [(cmap, cmap.name), *cmaps]
low, high = mappable.get_clim()
mappabledata = [
('Label', label),
('Colormap', [cmap.name] + cmaps),
('Min. value', low),
('Max. value', high),
]
if hasattr(mappable, "get_interpolation"): # Images.
interpolations = [
(name, name) for name in sorted(mimage.interpolations_names)]
mappabledata.append((
'Interpolation',
[mappable.get_interpolation(), *interpolations]))
mappables.append([mappabledata, label, ""])
# Is there a scalarmappable displayed?
has_sm = bool(mappables)
datalist = [(general, "Axes", "")]
if curves:
datalist.append((curves, "Curves", ""))
if mappables:
datalist.append((mappables, "Images, etc.", ""))
def apply_callback(data):
"""A callback to apply changes."""
orig_limits = {
name: getattr(axes, f"get_{name}lim")()
for name in axis_map
}
general = data.pop(0)
curves = data.pop(0) if has_curve else []
mappables = data.pop(0) if has_sm else []
if data:
raise ValueError("Unexpected field")
title = general.pop(0)
axes.set_title(title)
generate_legend = general.pop()
for i, (name, axis) in enumerate(axis_map.items()):
axis_min = general[4*i]
axis_max = general[4*i + 1]
axis_label = general[4*i + 2]
axis_scale = general[4*i + 3]
if axis.get_scale() != axis_scale:
getattr(axes, f"set_{name}scale")(axis_scale)
axis._set_lim(axis_min, axis_max, auto=False)
axis.set_label_text(axis_label)
# Restore the unit data
axis.converter = axis_converter[name]
axis.set_units(axis_units[name])
# Set / Curves
for index, curve in enumerate(curves):
line = labeled_lines[index][1]
(label, linestyle, drawstyle, linewidth, color, marker, markersize,
markerfacecolor, markeredgecolor) = curve
line.set_label(label)
line.set_linestyle(linestyle)
line.set_drawstyle(drawstyle)
line.set_linewidth(linewidth)
rgba = mcolors.to_rgba(color)
line.set_alpha(None)
line.set_color(rgba)
if marker != 'none':
line.set_marker(marker)
line.set_markersize(markersize)
line.set_markerfacecolor(markerfacecolor)
line.set_markeredgecolor(markeredgecolor)
# Set ScalarMappables.
for index, mappable_settings in enumerate(mappables):
mappable = labeled_mappables[index][1]
if len(mappable_settings) == 5:
label, cmap, low, high, interpolation = mappable_settings
mappable.set_interpolation(interpolation)
elif len(mappable_settings) == 4:
label, cmap, low, high = mappable_settings
mappable.set_label(label)
mappable.set_cmap(cm.get_cmap(cmap))
mappable.set_clim(*sorted([low, high]))
# re-generate legend, if checkbox is checked
if generate_legend:
draggable = None
ncols = 1
if axes.legend_ is not None:
old_legend = axes.get_legend()
draggable = old_legend._draggable is not None
ncols = old_legend._ncols
new_legend = axes.legend(ncols=ncols)
if new_legend:
new_legend.set_draggable(draggable)
# Redraw
figure = axes.get_figure()
figure.canvas.draw()
for name in axis_map:
if getattr(axes, f"get_{name}lim")() != orig_limits[name]:
figure.canvas.toolbar.push_current()
break
_formlayout.fedit(
datalist, title="Figure options", parent=parent,
icon=QtGui.QIcon(
str(cbook._get_data_path('images', 'qt4_editor_options.svg'))),
apply=apply_callback) | Edit matplotlib figure options |
171,319 | import asyncio
import datetime
from io import BytesIO, StringIO
import json
import logging
import os
from pathlib import Path
import numpy as np
from PIL import Image
from matplotlib import _api, backend_bases, backend_tools
from matplotlib.backends import backend_agg
from matplotlib.backend_bases import (
_Backend, KeyEvent, LocationEvent, MouseEvent, ResizeEvent)
_SPECIAL_KEYS_LUT = {'Alt': 'alt',
'AltGraph': 'alt',
'CapsLock': 'caps_lock',
'Control': 'control',
'Meta': 'meta',
'NumLock': 'num_lock',
'ScrollLock': 'scroll_lock',
'Shift': 'shift',
'Super': 'super',
'Enter': 'enter',
'Tab': 'tab',
'ArrowDown': 'down',
'ArrowLeft': 'left',
'ArrowRight': 'right',
'ArrowUp': 'up',
'End': 'end',
'Home': 'home',
'PageDown': 'pagedown',
'PageUp': 'pageup',
'Backspace': 'backspace',
'Delete': 'delete',
'Insert': 'insert',
'Escape': 'escape',
'Pause': 'pause',
'Select': 'select',
'Dead': 'dead',
'F1': 'f1',
'F2': 'f2',
'F3': 'f3',
'F4': 'f4',
'F5': 'f5',
'F6': 'f6',
'F7': 'f7',
'F8': 'f8',
'F9': 'f9',
'F10': 'f10',
'F11': 'f11',
'F12': 'f12'}
The provided code snippet includes necessary dependencies for implementing the `_handle_key` function. Write a Python function `def _handle_key(key)` to solve the following problem:
Handle key values
Here is the function:
def _handle_key(key):
"""Handle key values"""
value = key[key.index('k') + 1:]
if 'shift+' in key:
if len(value) == 1:
key = key.replace('shift+', '')
if value in _SPECIAL_KEYS_LUT:
value = _SPECIAL_KEYS_LUT[value]
key = key[:key.index('k')] + value
return key | Handle key values |
171,320 | import base64
import codecs
import datetime
import gzip
import hashlib
from io import BytesIO
import itertools
import logging
import os
import re
import uuid
import numpy as np
from PIL import Image
import matplotlib as mpl
from matplotlib import _api, cbook, font_manager as fm
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, RendererBase)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.colors import rgb2hex
from matplotlib.dates import UTC
from matplotlib.path import Path
from matplotlib import _path
from matplotlib.transforms import Affine2D, Affine2DBase
def _escape_cdata(s):
def escape_cdata(s):
return _escape_cdata(s) | null |
171,321 | import base64
import codecs
import datetime
import gzip
import hashlib
from io import BytesIO
import itertools
import logging
import os
import re
import uuid
import numpy as np
from PIL import Image
import matplotlib as mpl
from matplotlib import _api, cbook, font_manager as fm
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, RendererBase)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.colors import rgb2hex
from matplotlib.dates import UTC
from matplotlib.path import Path
from matplotlib import _path
from matplotlib.transforms import Affine2D, Affine2DBase
def _escape_comment(s):
s = _escape_cdata(s)
return _escape_xml_comment.sub('- ', s)
def escape_comment(s):
return _escape_comment.sub(s) | null |
171,322 | import base64
import codecs
import datetime
import gzip
import hashlib
from io import BytesIO
import itertools
import logging
import os
import re
import uuid
import numpy as np
from PIL import Image
import matplotlib as mpl
from matplotlib import _api, cbook, font_manager as fm
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, RendererBase)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.colors import rgb2hex
from matplotlib.dates import UTC
from matplotlib.path import Path
from matplotlib import _path
from matplotlib.transforms import Affine2D, Affine2DBase
def _escape_attrib(s):
s = s.replace("&", "&")
s = s.replace("'", "'")
s = s.replace('"', """)
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
def escape_attrib(s):
return _escape_attrib(s) | null |
171,323 | import base64
import codecs
import datetime
import gzip
import hashlib
from io import BytesIO
import itertools
import logging
import os
import re
import uuid
import numpy as np
from PIL import Image
import matplotlib as mpl
from matplotlib import _api, cbook, font_manager as fm
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, RendererBase)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.colors import rgb2hex
from matplotlib.dates import UTC
from matplotlib.path import Path
from matplotlib import _path
from matplotlib.transforms import Affine2D, Affine2DBase
def _escape_cdata(s):
s = s.replace("&", "&")
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
def _escape_attrib(s):
s = s.replace("&", "&")
s = s.replace("'", "'")
s = s.replace('"', """)
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
def _quote_escape_attrib(s):
return ('"' + _escape_cdata(s) + '"' if '"' not in s else
"'" + _escape_cdata(s) + "'" if "'" not in s else
'"' + _escape_attrib(s) + '"') | null |
171,324 | import base64
import codecs
import datetime
import gzip
import hashlib
from io import BytesIO
import itertools
import logging
import os
import re
import uuid
import numpy as np
from PIL import Image
import matplotlib as mpl
from matplotlib import _api, cbook, font_manager as fm
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, RendererBase)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.colors import rgb2hex
from matplotlib.dates import UTC
from matplotlib.path import Path
from matplotlib import _path
from matplotlib.transforms import Affine2D, Affine2DBase
def _short_float_fmt(x):
def short_float_fmt(x):
return _short_float_fmt(x) | null |
171,325 | import base64
import codecs
import datetime
import gzip
import hashlib
from io import BytesIO
import itertools
import logging
import os
import re
import uuid
import numpy as np
from PIL import Image
import matplotlib as mpl
from matplotlib import _api, cbook, font_manager as fm
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, RendererBase)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.colors import rgb2hex
from matplotlib.dates import UTC
from matplotlib.path import Path
from matplotlib import _path
from matplotlib.transforms import Affine2D, Affine2DBase
def _generate_transform(transform_list):
parts = []
for type, value in transform_list:
if (type == 'scale' and (value == (1,) or value == (1, 1))
or type == 'translate' and value == (0, 0)
or type == 'rotate' and value == (0,)):
continue
if type == 'matrix' and isinstance(value, Affine2DBase):
value = value.to_values()
parts.append('%s(%s)' % (
type, ' '.join(_short_float_fmt(x) for x in value)))
return ' '.join(parts)
def generate_transform(transform_list=None):
return _generate_transform(transform_list or []) | null |
171,326 | import base64
import codecs
import datetime
import gzip
import hashlib
from io import BytesIO
import itertools
import logging
import os
import re
import uuid
import numpy as np
from PIL import Image
import matplotlib as mpl
from matplotlib import _api, cbook, font_manager as fm
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, RendererBase)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.colors import rgb2hex
from matplotlib.dates import UTC
from matplotlib.path import Path
from matplotlib import _path
from matplotlib.transforms import Affine2D, Affine2DBase
def _generate_css(attrib):
return "; ".join(f"{k}: {v}" for k, v in attrib.items())
def generate_css(attrib=None):
return _generate_css(attrib or {}) | null |
171,327 | import base64
import codecs
import datetime
import gzip
import hashlib
from io import BytesIO
import itertools
import logging
import os
import re
import uuid
import numpy as np
from PIL import Image
import matplotlib as mpl
from matplotlib import _api, cbook, font_manager as fm
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, RendererBase)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.colors import rgb2hex
from matplotlib.dates import UTC
from matplotlib.path import Path
from matplotlib import _path
from matplotlib.transforms import Affine2D, Affine2DBase
def _check_is_str(info, key):
if not isinstance(info, str):
raise TypeError(f'Invalid type for {key} metadata. Expected str, not '
f'{type(info)}.') | null |
171,328 | import base64
import codecs
import datetime
import gzip
import hashlib
from io import BytesIO
import itertools
import logging
import os
import re
import uuid
import numpy as np
from PIL import Image
import matplotlib as mpl
from matplotlib import _api, cbook, font_manager as fm
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, RendererBase)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.colors import rgb2hex
from matplotlib.dates import UTC
from matplotlib.path import Path
from matplotlib import _path
from matplotlib.transforms import Affine2D, Affine2DBase
def _check_is_iterable_of_str(infos, key):
if np.iterable(infos):
for info in infos:
if not isinstance(info, str):
raise TypeError(f'Invalid type for {key} metadata. Expected '
f'iterable of str, not {type(info)}.')
else:
raise TypeError(f'Invalid type for {key} metadata. Expected str or '
f'iterable of str, not {type(infos)}.') | null |
171,329 | import functools
import os
import sys
import traceback
import matplotlib as mpl
from matplotlib import _api, backend_tools, cbook
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, NavigationToolbar2,
TimerBase, cursors, ToolContainerBase, MouseButton,
CloseEvent, KeyEvent, LocationEvent, MouseEvent, ResizeEvent)
import matplotlib.backends.qt_editor.figureoptions as figureoptions
from . import qt_compat
from .qt_compat import (
QtCore, QtGui, QtWidgets, __version__, QT_API,
_enum, _to_int, _isdeleted, _maybe_allow_interrupt
)
if QT_API in [QT_API_PYQT6, QT_API_PYQT5, QT_API_PYSIDE6, QT_API_PYSIDE2]:
_setup_pyqt5plus()
elif QT_API is None: # See above re: dict.__getitem__.
if _QT_FORCE_QT5_BINDING:
_candidates = [
(_setup_pyqt5plus, QT_API_PYQT5),
(_setup_pyqt5plus, QT_API_PYSIDE2),
]
else:
_candidates = [
(_setup_pyqt5plus, QT_API_PYQT6),
(_setup_pyqt5plus, QT_API_PYSIDE6),
(_setup_pyqt5plus, QT_API_PYQT5),
(_setup_pyqt5plus, QT_API_PYSIDE2),
]
for _setup, QT_API in _candidates:
try:
_setup()
except ImportError:
continue
break
else:
raise ImportError(
"Failed to import any of the following Qt binding modules: {}"
.format(", ".join(_ETS.values())))
else: # We should not get there.
raise AssertionError(f"Unexpected QT_API: {QT_API}")
def _create_qApp():
app = QtWidgets.QApplication.instance()
# Create a new QApplication and configure it if none exists yet, as only
# one QApplication can exist at a time.
if app is None:
# display_is_valid returns False only if on Linux and neither X11
# nor Wayland display can be opened.
if not mpl._c_internal_utils.display_is_valid():
raise RuntimeError('Invalid DISPLAY variable')
# Check to make sure a QApplication from a different major version
# of Qt is not instantiated in the process
if QT_API in {'PyQt6', 'PySide6'}:
other_bindings = ('PyQt5', 'PySide2')
elif QT_API in {'PyQt5', 'PySide2'}:
other_bindings = ('PyQt6', 'PySide6')
else:
raise RuntimeError("Should never be here")
for binding in other_bindings:
mod = sys.modules.get(f'{binding}.QtWidgets')
if mod is not None and mod.QApplication.instance() is not None:
other_core = sys.modules.get(f'{binding}.QtCore')
_api.warn_external(
f'Matplotlib is using {QT_API} which wraps '
f'{QtCore.qVersion()} however an instantiated '
f'QApplication from {binding} which wraps '
f'{other_core.qVersion()} exists. Mixing Qt major '
'versions may not work as expected.'
)
break
try:
QtWidgets.QApplication.setAttribute(
QtCore.Qt.AA_EnableHighDpiScaling)
except AttributeError: # Only for Qt>=5.6, <6.
pass
try:
QtWidgets.QApplication.setHighDpiScaleFactorRoundingPolicy(
QtCore.Qt.HighDpiScaleFactorRoundingPolicy.PassThrough)
except AttributeError: # Only for Qt>=5.14.
pass
app = QtWidgets.QApplication(["matplotlib"])
if sys.platform == "darwin":
image = str(cbook._get_data_path('images/matplotlib.svg'))
icon = QtGui.QIcon(image)
app.setWindowIcon(icon)
app.lastWindowClosed.connect(app.quit)
cbook._setup_new_guiapp()
try:
app.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps) # Only for Qt<6.
except AttributeError:
pass
return app | null |
171,330 | import codecs
import datetime
import functools
from io import BytesIO
import logging
import math
import os
import pathlib
import re
import shutil
import subprocess
from tempfile import TemporaryDirectory
import weakref
from PIL import Image
import matplotlib as mpl
from matplotlib import _api, cbook, font_manager as fm
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, RendererBase
)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.backends.backend_pdf import (
_create_pdf_info_dict, _datetime_to_pdf)
from matplotlib.path import Path
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
def _get_preamble():
"""Prepare a LaTeX preamble based on the rcParams configuration."""
preamble = [mpl.rcParams["pgf.preamble"]]
if mpl.rcParams["pgf.texsystem"] != "pdflatex":
preamble.append("\\usepackage{fontspec}")
if mpl.rcParams["pgf.rcfonts"]:
families = ["serif", "sans\\-serif", "monospace"]
commands = ["setmainfont", "setsansfont", "setmonofont"]
for family, command in zip(families, commands):
# 1) Forward slashes also work on Windows, so don't mess with
# backslashes. 2) The dirname needs to include a separator.
path = pathlib.Path(fm.findfont(family))
preamble.append(r"\%s{%s}[Path=\detokenize{%s/}]" % (
command, path.name, path.parent.as_posix()))
preamble.append(mpl.texmanager._usepackage_if_not_loaded(
"underscore", option="strings")) # Documented as "must come last".
return "\n".join(preamble)
The provided code snippet includes necessary dependencies for implementing the `get_fontspec` function. Write a Python function `def get_fontspec()` to solve the following problem:
Build fontspec preamble from rc.
Here is the function:
def get_fontspec():
"""Build fontspec preamble from rc."""
with mpl.rc_context({"pgf.preamble": ""}):
return _get_preamble() | Build fontspec preamble from rc. |
171,331 | import codecs
import datetime
import functools
from io import BytesIO
import logging
import math
import os
import pathlib
import re
import shutil
import subprocess
from tempfile import TemporaryDirectory
import weakref
from PIL import Image
import matplotlib as mpl
from matplotlib import _api, cbook, font_manager as fm
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, RendererBase
)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.backends.backend_pdf import (
_create_pdf_info_dict, _datetime_to_pdf)
from matplotlib.path import Path
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
The provided code snippet includes necessary dependencies for implementing the `get_preamble` function. Write a Python function `def get_preamble()` to solve the following problem:
Get LaTeX preamble from rc.
Here is the function:
def get_preamble():
"""Get LaTeX preamble from rc."""
return mpl.rcParams["pgf.preamble"] | Get LaTeX preamble from rc. |
171,332 | import codecs
import datetime
import functools
from io import BytesIO
import logging
import math
import os
import pathlib
import re
import shutil
import subprocess
from tempfile import TemporaryDirectory
import weakref
from PIL import Image
import matplotlib as mpl
from matplotlib import _api, cbook, font_manager as fm
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, RendererBase
)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.backends.backend_pdf import (
_create_pdf_info_dict, _datetime_to_pdf)
from matplotlib.path import Path
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
def _tex_escape(text):
r"""
Do some necessary and/or useful substitutions for texts to be included in
LaTeX documents.
This distinguishes text-mode and math-mode by replacing the math separator
``$`` with ``\(\displaystyle %s\)``. Escaped math separators (``\$``)
are ignored.
The following characters are escaped in text segments: ``^%``
"""
# Sometimes, matplotlib adds the unknown command \mathdefault.
# Not using \mathnormal instead since this looks odd for the latex cm font.
text = _replace_mathdefault(text)
text = text.replace("\N{MINUS SIGN}", r"\ensuremath{-}")
# split text into normaltext and inline math parts
parts = _split_math(text)
for i, s in enumerate(parts):
if not i % 2:
# textmode replacements
s = _replace_escapetext(s)
else:
# mathmode replacements
s = r"\(\displaystyle %s\)" % s
parts[i] = s
return "".join(parts)
def common_texification(text):
return _tex_escape(text) | null |
171,333 | import codecs
import datetime
import functools
from io import BytesIO
import logging
import math
import os
import pathlib
import re
import shutil
import subprocess
from tempfile import TemporaryDirectory
import weakref
from PIL import Image
import matplotlib as mpl
from matplotlib import _api, cbook, font_manager as fm
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, RendererBase
)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.backends.backend_pdf import (
_create_pdf_info_dict, _datetime_to_pdf)
from matplotlib.path import Path
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
def _writeln(fh, line):
# Ending lines with a % prevents TeX from inserting spurious spaces
# (https://tex.stackexchange.com/questions/7453).
fh.write(line)
fh.write("%\n")
def writeln(fh, line):
return _writeln(fh, line) | null |
171,334 | import codecs
import datetime
import functools
from io import BytesIO
import logging
import math
import os
import pathlib
import re
import shutil
import subprocess
from tempfile import TemporaryDirectory
import weakref
from PIL import Image
import matplotlib as mpl
from matplotlib import _api, cbook, font_manager as fm
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, RendererBase
)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.backends.backend_pdf import (
_create_pdf_info_dict, _datetime_to_pdf)
from matplotlib.path import Path
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
_log = logging.getLogger(__name__)
def _tex_escape(text):
r"""
Do some necessary and/or useful substitutions for texts to be included in
LaTeX documents.
This distinguishes text-mode and math-mode by replacing the math separator
``$`` with ``\(\displaystyle %s\)``. Escaped math separators (``\$``)
are ignored.
The following characters are escaped in text segments: ``^%``
"""
# Sometimes, matplotlib adds the unknown command \mathdefault.
# Not using \mathnormal instead since this looks odd for the latex cm font.
text = _replace_mathdefault(text)
text = text.replace("\N{MINUS SIGN}", r"\ensuremath{-}")
# split text into normaltext and inline math parts
parts = _split_math(text)
for i, s in enumerate(parts):
if not i % 2:
# textmode replacements
s = _replace_escapetext(s)
else:
# mathmode replacements
s = r"\(\displaystyle %s\)" % s
parts[i] = s
return "".join(parts)
The provided code snippet includes necessary dependencies for implementing the `_escape_and_apply_props` function. Write a Python function `def _escape_and_apply_props(s, prop)` to solve the following problem:
Generate a TeX string that renders string *s* with font properties *prop*, also applying any required escapes to *s*.
Here is the function:
def _escape_and_apply_props(s, prop):
"""
Generate a TeX string that renders string *s* with font properties *prop*,
also applying any required escapes to *s*.
"""
commands = []
families = {"serif": r"\rmfamily", "sans": r"\sffamily",
"sans-serif": r"\sffamily", "monospace": r"\ttfamily"}
family = prop.get_family()[0]
if family in families:
commands.append(families[family])
elif (any(font.name == family for font in fm.fontManager.ttflist)
and mpl.rcParams["pgf.texsystem"] != "pdflatex"):
commands.append(r"\setmainfont{%s}\rmfamily" % family)
else:
_log.warning("Ignoring unknown font: %s", family)
size = prop.get_size_in_points()
commands.append(r"\fontsize{%f}{%f}" % (size, size * 1.2))
styles = {"normal": r"", "italic": r"\itshape", "oblique": r"\slshape"}
commands.append(styles[prop.get_style()])
boldstyles = ["semibold", "demibold", "demi", "bold", "heavy",
"extra bold", "black"]
if prop.get_weight() in boldstyles:
commands.append(r"\bfseries")
commands.append(r"\selectfont")
return "".join(commands) + " " + _tex_escape(s) | Generate a TeX string that renders string *s* with font properties *prop*, also applying any required escapes to *s*. |
171,335 | import codecs
import datetime
import functools
from io import BytesIO
import logging
import math
import os
import pathlib
import re
import shutil
import subprocess
from tempfile import TemporaryDirectory
import weakref
from PIL import Image
import matplotlib as mpl
from matplotlib import _api, cbook, font_manager as fm
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, RendererBase
)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.backends.backend_pdf import (
_create_pdf_info_dict, _datetime_to_pdf)
from matplotlib.path import Path
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
def _datetime_to_pdf(d):
"""
Convert a datetime to a PDF string representing it.
Used for PDF and PGF.
"""
r = d.strftime('D:%Y%m%d%H%M%S')
z = d.utcoffset()
if z is not None:
z = z.seconds
else:
if time.daylight:
z = time.altzone
else:
z = time.timezone
if z == 0:
r += 'Z'
elif z < 0:
r += "+%02d'%02d'" % ((-z) // 3600, (-z) % 3600)
else:
r += "-%02d'%02d'" % (z // 3600, z % 3600)
return r
The provided code snippet includes necessary dependencies for implementing the `_metadata_to_str` function. Write a Python function `def _metadata_to_str(key, value)` to solve the following problem:
Convert metadata key/value to a form that hyperref accepts.
Here is the function:
def _metadata_to_str(key, value):
"""Convert metadata key/value to a form that hyperref accepts."""
if isinstance(value, datetime.datetime):
value = _datetime_to_pdf(value)
elif key == 'Trapped':
value = value.name.decode('ascii')
else:
value = str(value)
return f'{key}={{{value}}}' | Convert metadata key/value to a form that hyperref accepts. |
171,336 | import codecs
import datetime
import functools
from io import BytesIO
import logging
import math
import os
import pathlib
import re
import shutil
import subprocess
from tempfile import TemporaryDirectory
import weakref
from PIL import Image
import matplotlib as mpl
from matplotlib import _api, cbook, font_manager as fm
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, RendererBase
)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.backends.backend_pdf import (
_create_pdf_info_dict, _datetime_to_pdf)
from matplotlib.path import Path
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
The provided code snippet includes necessary dependencies for implementing the `make_pdf_to_png_converter` function. Write a Python function `def make_pdf_to_png_converter()` to solve the following problem:
Return a function that converts a pdf file to a png file.
Here is the function:
def make_pdf_to_png_converter():
"""Return a function that converts a pdf file to a png file."""
try:
mpl._get_executable_info("pdftocairo")
except mpl.ExecutableNotFoundError:
pass
else:
return lambda pdffile, pngfile, dpi: subprocess.check_output(
["pdftocairo", "-singlefile", "-transp", "-png", "-r", "%d" % dpi,
pdffile, os.path.splitext(pngfile)[0]],
stderr=subprocess.STDOUT)
try:
gs_info = mpl._get_executable_info("gs")
except mpl.ExecutableNotFoundError:
pass
else:
return lambda pdffile, pngfile, dpi: subprocess.check_output(
[gs_info.executable,
'-dQUIET', '-dSAFER', '-dBATCH', '-dNOPAUSE', '-dNOPROMPT',
'-dUseCIEColor', '-dTextAlphaBits=4',
'-dGraphicsAlphaBits=4', '-dDOINTERPOLATE',
'-sDEVICE=pngalpha', '-sOutputFile=%s' % pngfile,
'-r%d' % dpi, pdffile],
stderr=subprocess.STDOUT)
raise RuntimeError("No suitable pdf to png renderer found.") | Return a function that converts a pdf file to a png file. |
171,337 | import codecs
import datetime
import functools
from io import BytesIO
import logging
import math
import os
import pathlib
import re
import shutil
import subprocess
from tempfile import TemporaryDirectory
import weakref
from PIL import Image
import matplotlib as mpl
from matplotlib import _api, cbook, font_manager as fm
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, RendererBase
)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.backends.backend_pdf import (
_create_pdf_info_dict, _datetime_to_pdf)
from matplotlib.path import Path
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
class LatexError(Exception):
def __init__(self, message, latex_output=""):
def __str__(self):
class LatexManager:
def _build_latex_header():
def _get_cached_or_new(cls):
def _get_cached_or_new_impl(cls, header):
def _stdin_writeln(self, s):
def _expect(self, s):
def _expect_prompt(self):
def __init__(self):
def _setup_latex_process(self, *, expect_reply=True):
def finalize_latex(latex):
def get_width_height_descent(self, text, prop):
def _get_box_metrics(self, tex):
def _get_image_inclusion_command():
man = LatexManager._get_cached_or_new()
man._stdin_writeln(
r"\includegraphics[interpolate=true]{%s}"
# Don't mess with backslashes on Windows.
% cbook._get_data_path("images/matplotlib.png").as_posix())
try:
man._expect_prompt()
return r"\includegraphics"
except LatexError:
# Discard the broken manager.
LatexManager._get_cached_or_new_impl.cache_clear()
return r"\pgfimage" | null |
171,338 | import copy
from collections import namedtuple
import enum
import functools
import logging
import os
import re
import types
import unicodedata
import numpy as np
from pyparsing import (
Empty, Forward, Literal, NotAny, oneOf, OneOrMore, Optional,
ParseBaseException, ParseException, ParseExpression, ParseFatalException,
ParserElement, ParseResults, QuotedString, Regex, StringEnd, ZeroOrMore,
pyparsing_common)
import matplotlib as mpl
from . import _api, cbook
from ._mathtext_data import (
latex_to_bakoma, stix_glyph_fixes, stix_virtual_fonts, tex2uni)
from .font_manager import FontProperties, findfont, get_font
from .ft2font import FT2Image, KERNING_DEFAULT
tex2uni = {
'widehat' : 0x0302,
'widetilde' : 0x0303,
'widebar' : 0x0305,
'langle' : 0x27e8,
'rangle' : 0x27e9,
'perp' : 0x27c2,
'neq' : 0x2260,
'Join' : 0x2a1d,
'leqslant' : 0x2a7d,
'geqslant' : 0x2a7e,
'lessapprox' : 0x2a85,
'gtrapprox' : 0x2a86,
'lesseqqgtr' : 0x2a8b,
'gtreqqless' : 0x2a8c,
'triangleeq' : 0x225c,
'eqslantless' : 0x2a95,
'eqslantgtr' : 0x2a96,
'backepsilon' : 0x03f6,
'precapprox' : 0x2ab7,
'succapprox' : 0x2ab8,
'fallingdotseq' : 0x2252,
'subseteqq' : 0x2ac5,
'supseteqq' : 0x2ac6,
'varpropto' : 0x221d,
'precnapprox' : 0x2ab9,
'succnapprox' : 0x2aba,
'subsetneqq' : 0x2acb,
'supsetneqq' : 0x2acc,
'lnapprox' : 0x2ab9,
'gnapprox' : 0x2aba,
'longleftarrow' : 0x27f5,
'longrightarrow' : 0x27f6,
'longleftrightarrow' : 0x27f7,
'Longleftarrow' : 0x27f8,
'Longrightarrow' : 0x27f9,
'Longleftrightarrow' : 0x27fa,
'longmapsto' : 0x27fc,
'leadsto' : 0x21dd,
'dashleftarrow' : 0x290e,
'dashrightarrow' : 0x290f,
'circlearrowleft' : 0x21ba,
'circlearrowright' : 0x21bb,
'leftrightsquigarrow' : 0x21ad,
'leftsquigarrow' : 0x219c,
'rightsquigarrow' : 0x219d,
'Game' : 0x2141,
'hbar' : 0x0127,
'hslash' : 0x210f,
'ldots' : 0x2026,
'vdots' : 0x22ee,
'doteqdot' : 0x2251,
'doteq' : 8784,
'partial' : 8706,
'gg' : 8811,
'asymp' : 8781,
'blacktriangledown' : 9662,
'otimes' : 8855,
'nearrow' : 8599,
'varpi' : 982,
'vee' : 8744,
'vec' : 8407,
'smile' : 8995,
'succnsim' : 8937,
'gimel' : 8503,
'vert' : 124,
'|' : 8214,
'varrho' : 1009,
'P' : 182,
'approxident' : 8779,
'Swarrow' : 8665,
'textasciicircum' : 94,
'imageof' : 8887,
'ntriangleleft' : 8938,
'nleq' : 8816,
'div' : 247,
'nparallel' : 8742,
'Leftarrow' : 8656,
'lll' : 8920,
'oiint' : 8751,
'ngeq' : 8817,
'Theta' : 920,
'origof' : 8886,
'blacksquare' : 9632,
'solbar' : 9023,
'neg' : 172,
'sum' : 8721,
'Vdash' : 8873,
'coloneq' : 8788,
'degree' : 176,
'bowtie' : 8904,
'blacktriangleright' : 9654,
'varsigma' : 962,
'leq' : 8804,
'ggg' : 8921,
'lneqq' : 8808,
'scurel' : 8881,
'stareq' : 8795,
'BbbN' : 8469,
'nLeftarrow' : 8653,
'nLeftrightarrow' : 8654,
'k' : 808,
'bot' : 8869,
'BbbC' : 8450,
'Lsh' : 8624,
'leftleftarrows' : 8647,
'BbbZ' : 8484,
'digamma' : 989,
'BbbR' : 8477,
'BbbP' : 8473,
'BbbQ' : 8474,
'vartriangleright' : 8883,
'succsim' : 8831,
'wedge' : 8743,
'lessgtr' : 8822,
'veebar' : 8891,
'mapsdown' : 8615,
'Rsh' : 8625,
'chi' : 967,
'prec' : 8826,
'nsubseteq' : 8840,
'therefore' : 8756,
'eqcirc' : 8790,
'textexclamdown' : 161,
'nRightarrow' : 8655,
'flat' : 9837,
'notin' : 8713,
'llcorner' : 8990,
'varepsilon' : 949,
'bigtriangleup' : 9651,
'aleph' : 8501,
'dotminus' : 8760,
'upsilon' : 965,
'Lambda' : 923,
'cap' : 8745,
'barleftarrow' : 8676,
'mu' : 956,
'boxplus' : 8862,
'mp' : 8723,
'circledast' : 8859,
'tau' : 964,
'in' : 8712,
'backslash' : 92,
'varnothing' : 8709,
'sharp' : 9839,
'eqsim' : 8770,
'gnsim' : 8935,
'Searrow' : 8664,
'updownarrows' : 8645,
'heartsuit' : 9825,
'trianglelefteq' : 8884,
'ddag' : 8225,
'sqsubseteq' : 8849,
'mapsfrom' : 8612,
'boxbar' : 9707,
'sim' : 8764,
'Nwarrow' : 8662,
'nequiv' : 8802,
'succ' : 8827,
'vdash' : 8866,
'Leftrightarrow' : 8660,
'parallel' : 8741,
'invnot' : 8976,
'natural' : 9838,
'ss' : 223,
'uparrow' : 8593,
'nsim' : 8769,
'hookrightarrow' : 8618,
'Equiv' : 8803,
'approx' : 8776,
'Vvdash' : 8874,
'nsucc' : 8833,
'leftrightharpoons' : 8651,
'Re' : 8476,
'boxminus' : 8863,
'equiv' : 8801,
'Lleftarrow' : 8666,
'll' : 8810,
'Cup' : 8915,
'measeq' : 8798,
'upharpoonleft' : 8639,
'lq' : 8216,
'Upsilon' : 933,
'subsetneq' : 8842,
'greater' : 62,
'supsetneq' : 8843,
'Cap' : 8914,
'L' : 321,
'spadesuit' : 9824,
'lrcorner' : 8991,
'not' : 824,
'bar' : 772,
'rightharpoonaccent' : 8401,
'boxdot' : 8865,
'l' : 322,
'leftharpoondown' : 8637,
'bigcup' : 8899,
'iint' : 8748,
'bigwedge' : 8896,
'downharpoonleft' : 8643,
'textasciitilde' : 126,
'subset' : 8834,
'leqq' : 8806,
'mapsup' : 8613,
'nvDash' : 8877,
'looparrowleft' : 8619,
'nless' : 8814,
'rightarrowbar' : 8677,
'Vert' : 8214,
'downdownarrows' : 8650,
'uplus' : 8846,
'simeq' : 8771,
'napprox' : 8777,
'ast' : 8727,
'twoheaduparrow' : 8607,
'doublebarwedge' : 8966,
'Sigma' : 931,
'leftharpoonaccent' : 8400,
'ntrianglelefteq' : 8940,
'nexists' : 8708,
'times' : 215,
'measuredangle' : 8737,
'bumpeq' : 8783,
'carriagereturn' : 8629,
'adots' : 8944,
'checkmark' : 10003,
'lambda' : 955,
'xi' : 958,
'rbrace' : 125,
'rbrack' : 93,
'Nearrow' : 8663,
'maltese' : 10016,
'clubsuit' : 9827,
'top' : 8868,
'overarc' : 785,
'varphi' : 966,
'Delta' : 916,
'iota' : 953,
'nleftarrow' : 8602,
'candra' : 784,
'supset' : 8835,
'triangleleft' : 9665,
'gtreqless' : 8923,
'ntrianglerighteq' : 8941,
'quad' : 8195,
'Xi' : 926,
'gtrdot' : 8919,
'leftthreetimes' : 8907,
'minus' : 8722,
'preccurlyeq' : 8828,
'nleftrightarrow' : 8622,
'lambdabar' : 411,
'blacktriangle' : 9652,
'kernelcontraction' : 8763,
'Phi' : 934,
'angle' : 8736,
'spadesuitopen' : 9828,
'eqless' : 8924,
'mid' : 8739,
'varkappa' : 1008,
'Ldsh' : 8626,
'updownarrow' : 8597,
'beta' : 946,
'textquotedblleft' : 8220,
'rho' : 961,
'alpha' : 945,
'intercal' : 8890,
'beth' : 8502,
'grave' : 768,
'acwopencirclearrow' : 8634,
'nmid' : 8740,
'nsupset' : 8837,
'sigma' : 963,
'dot' : 775,
'Rightarrow' : 8658,
'turnednot' : 8985,
'backsimeq' : 8909,
'leftarrowtail' : 8610,
'approxeq' : 8778,
'curlyeqsucc' : 8927,
'rightarrowtail' : 8611,
'Psi' : 936,
'copyright' : 169,
'yen' : 165,
'vartriangleleft' : 8882,
'rasp' : 700,
'triangleright' : 9655,
'precsim' : 8830,
'infty' : 8734,
'geq' : 8805,
'updownarrowbar' : 8616,
'precnsim' : 8936,
'H' : 779,
'ulcorner' : 8988,
'looparrowright' : 8620,
'ncong' : 8775,
'downarrow' : 8595,
'circeq' : 8791,
'subseteq' : 8838,
'bigstar' : 9733,
'prime' : 8242,
'lceil' : 8968,
'Rrightarrow' : 8667,
'oiiint' : 8752,
'curlywedge' : 8911,
'vDash' : 8872,
'lfloor' : 8970,
'ddots' : 8945,
'exists' : 8707,
'underbar' : 817,
'Pi' : 928,
'leftrightarrows' : 8646,
'sphericalangle' : 8738,
'coprod' : 8720,
'circledcirc' : 8858,
'gtrsim' : 8819,
'gneqq' : 8809,
'between' : 8812,
'theta' : 952,
'complement' : 8705,
'arceq' : 8792,
'nVdash' : 8878,
'S' : 167,
'wr' : 8768,
'wp' : 8472,
'backcong' : 8780,
'lasp' : 701,
'c' : 807,
'nabla' : 8711,
'dotplus' : 8724,
'eta' : 951,
'forall' : 8704,
'eth' : 240,
'colon' : 58,
'sqcup' : 8852,
'rightrightarrows' : 8649,
'sqsupset' : 8848,
'mapsto' : 8614,
'bigtriangledown' : 9661,
'sqsupseteq' : 8850,
'propto' : 8733,
'pi' : 960,
'pm' : 177,
'dots' : 0x2026,
'nrightarrow' : 8603,
'textasciiacute' : 180,
'Doteq' : 8785,
'breve' : 774,
'sqcap' : 8851,
'twoheadrightarrow' : 8608,
'kappa' : 954,
'vartriangle' : 9653,
'diamondsuit' : 9826,
'pitchfork' : 8916,
'blacktriangleleft' : 9664,
'nprec' : 8832,
'curvearrowright' : 8631,
'barwedge' : 8892,
'multimap' : 8888,
'textquestiondown' : 191,
'cong' : 8773,
'rtimes' : 8906,
'rightzigzagarrow' : 8669,
'rightarrow' : 8594,
'leftarrow' : 8592,
'__sqrt__' : 8730,
'twoheaddownarrow' : 8609,
'oint' : 8750,
'bigvee' : 8897,
'eqdef' : 8797,
'sterling' : 163,
'phi' : 981,
'Updownarrow' : 8661,
'backprime' : 8245,
'emdash' : 8212,
'Gamma' : 915,
'i' : 305,
'rceil' : 8969,
'leftharpoonup' : 8636,
'Im' : 8465,
'curvearrowleft' : 8630,
'wedgeq' : 8793,
'curlyeqprec' : 8926,
'questeq' : 8799,
'less' : 60,
'upuparrows' : 8648,
'tilde' : 771,
'textasciigrave' : 96,
'smallsetminus' : 8726,
'ell' : 8467,
'cup' : 8746,
'danger' : 9761,
'nVDash' : 8879,
'cdotp' : 183,
'cdots' : 8943,
'hat' : 770,
'eqgtr' : 8925,
'psi' : 968,
'frown' : 8994,
'acute' : 769,
'downzigzagarrow' : 8623,
'ntriangleright' : 8939,
'cupdot' : 8845,
'circleddash' : 8861,
'oslash' : 8856,
'mho' : 8487,
'd' : 803,
'sqsubset' : 8847,
'cdot' : 8901,
'Omega' : 937,
'OE' : 338,
'veeeq' : 8794,
'Finv' : 8498,
't' : 865,
'leftrightarrow' : 8596,
'swarrow' : 8601,
'rightthreetimes' : 8908,
'rightleftharpoons' : 8652,
'lesssim' : 8818,
'searrow' : 8600,
'because' : 8757,
'gtrless' : 8823,
'star' : 8902,
'nsubset' : 8836,
'zeta' : 950,
'dddot' : 8411,
'bigcirc' : 9675,
'Supset' : 8913,
'circ' : 8728,
'slash' : 8725,
'ocirc' : 778,
'prod' : 8719,
'twoheadleftarrow' : 8606,
'daleth' : 8504,
'upharpoonright' : 8638,
'odot' : 8857,
'Uparrow' : 8657,
'O' : 216,
'hookleftarrow' : 8617,
'trianglerighteq' : 8885,
'nsime' : 8772,
'oe' : 339,
'nwarrow' : 8598,
'o' : 248,
'ddddot' : 8412,
'downharpoonright' : 8642,
'succcurlyeq' : 8829,
'gamma' : 947,
'scrR' : 8475,
'dag' : 8224,
'thickspace' : 8197,
'frakZ' : 8488,
'lessdot' : 8918,
'triangledown' : 9663,
'ltimes' : 8905,
'scrB' : 8492,
'endash' : 8211,
'scrE' : 8496,
'scrF' : 8497,
'scrH' : 8459,
'scrI' : 8464,
'rightharpoondown' : 8641,
'scrL' : 8466,
'scrM' : 8499,
'frakC' : 8493,
'nsupseteq' : 8841,
'circledR' : 174,
'circledS' : 9416,
'ngtr' : 8815,
'bigcap' : 8898,
'scre' : 8495,
'Downarrow' : 8659,
'scrg' : 8458,
'overleftrightarrow' : 8417,
'scro' : 8500,
'lnsim' : 8934,
'eqcolon' : 8789,
'curlyvee' : 8910,
'urcorner' : 8989,
'lbrace' : 123,
'Bumpeq' : 8782,
'delta' : 948,
'boxtimes' : 8864,
'overleftarrow' : 8406,
'prurel' : 8880,
'clubsuitopen' : 9831,
'cwopencirclearrow' : 8635,
'geqq' : 8807,
'rightleftarrows' : 8644,
'aa' : 229,
'ac' : 8766,
'ae' : 230,
'int' : 8747,
'rfloor' : 8971,
'risingdotseq' : 8787,
'nvdash' : 8876,
'diamond' : 8900,
'ddot' : 776,
'backsim' : 8765,
'oplus' : 8853,
'triangleq' : 8796,
'check' : 780,
'ni' : 8715,
'iiint' : 8749,
'ne' : 8800,
'lesseqgtr' : 8922,
'obar' : 9021,
'supseteq' : 8839,
'nu' : 957,
'AA' : 197,
'AE' : 198,
'models' : 8871,
'ominus' : 8854,
'dashv' : 8867,
'omega' : 969,
'rq' : 8217,
'Subset' : 8912,
'rightharpoonup' : 8640,
'Rdsh' : 8627,
'bullet' : 8729,
'divideontimes' : 8903,
'lbrack' : 91,
'textquotedblright' : 8221,
'Colon' : 8759,
'%' : 37,
'$' : 36,
'{' : 123,
'}' : 125,
'_' : 95,
'#' : 35,
'imath' : 0x131,
'circumflexaccent' : 770,
'combiningbreve' : 774,
'combiningoverline' : 772,
'combininggraveaccent' : 768,
'combiningacuteaccent' : 769,
'combiningdiaeresis' : 776,
'combiningtilde' : 771,
'combiningrightarrowabove' : 8407,
'combiningdotabove' : 775,
'combiningthreedotsabove' : 8411,
'combiningfourdotsabove' : 8412,
'to' : 8594,
'succeq' : 8829,
'emptyset' : 8709,
'leftparen' : 40,
'rightparen' : 41,
'bigoplus' : 10753,
'leftangle' : 10216,
'rightangle' : 10217,
'leftbrace' : 124,
'rightbrace' : 125,
'jmath' : 567,
'bigodot' : 10752,
'preceq' : 8828,
'biguplus' : 10756,
'epsilon' : 949,
'vartheta' : 977,
'bigotimes' : 10754,
'guillemotleft' : 171,
'ring' : 730,
'Thorn' : 222,
'guilsinglright' : 8250,
'perthousand' : 8240,
'macron' : 175,
'cent' : 162,
'guillemotright' : 187,
'equal' : 61,
'asterisk' : 42,
'guilsinglleft' : 8249,
'plus' : 43,
'thorn' : 254,
'dagger' : 8224
}
The provided code snippet includes necessary dependencies for implementing the `get_unicode_index` function. Write a Python function `def get_unicode_index(symbol, math=False)` to solve the following problem:
r""" Return the integer index (from the Unicode table) of *symbol*. Parameters ---------- symbol : str A single (Unicode) character, a TeX command (e.g. r'\pi') or a Type1 symbol name (e.g. 'phi'). math : bool, default: False If True (deprecated), replace ASCII hyphen-minus by Unicode minus.
Here is the function:
def get_unicode_index(symbol, math=False): # Publicly exported.
r"""
Return the integer index (from the Unicode table) of *symbol*.
Parameters
----------
symbol : str
A single (Unicode) character, a TeX command (e.g. r'\pi') or a Type1
symbol name (e.g. 'phi').
math : bool, default: False
If True (deprecated), replace ASCII hyphen-minus by Unicode minus.
"""
# From UTF #25: U+2212 minus sign is the preferred
# representation of the unary and binary minus sign rather than
# the ASCII-derived U+002D hyphen-minus, because minus sign is
# unambiguous and because it is rendered with a more desirable
# length, usually longer than a hyphen.
# Remove this block when the 'math' parameter is deleted.
if math and symbol == '-':
return 0x2212
try: # This will succeed if symbol is a single Unicode char
return ord(symbol)
except TypeError:
pass
try: # Is symbol a TeX symbol (i.e. \alpha)
return tex2uni[symbol.strip("\\")]
except KeyError as err:
raise ValueError(
"'{}' is not a valid Unicode character or TeX/Type1 symbol"
.format(symbol)) from err | r""" Return the integer index (from the Unicode table) of *symbol*. Parameters ---------- symbol : str A single (Unicode) character, a TeX command (e.g. r'\pi') or a Type1 symbol name (e.g. 'phi'). math : bool, default: False If True (deprecated), replace ASCII hyphen-minus by Unicode minus. |
171,339 | import copy
from collections import namedtuple
import enum
import functools
import logging
import os
import re
import types
import unicodedata
import numpy as np
from pyparsing import (
Empty, Forward, Literal, NotAny, oneOf, OneOrMore, Optional,
ParseBaseException, ParseException, ParseExpression, ParseFatalException,
ParserElement, ParseResults, QuotedString, Regex, StringEnd, ZeroOrMore,
pyparsing_common)
import matplotlib as mpl
from . import _api, cbook
from ._mathtext_data import (
latex_to_bakoma, stix_glyph_fixes, stix_virtual_fonts, tex2uni)
from .font_manager import FontProperties, findfont, get_font
from .ft2font import FT2Image, KERNING_DEFAULT
class StixSansFonts(StixFonts):
"""
A font handling class for the STIX fonts (that uses sans-serif
characters by default).
"""
_sans = True
class FontConstantsBase:
"""
A set of constants that controls how certain things, such as sub-
and superscripts are laid out. These are all metrics that can't
be reliably retrieved from the font metrics in the font itself.
"""
# Percentage of x-height of additional horiz. space after sub/superscripts
script_space = 0.05
# Percentage of x-height that sub/superscripts drop below the baseline
subdrop = 0.4
# Percentage of x-height that superscripts are raised from the baseline
sup1 = 0.7
# Percentage of x-height that subscripts drop below the baseline
sub1 = 0.3
# Percentage of x-height that subscripts drop below the baseline when a
# superscript is present
sub2 = 0.5
# Percentage of x-height that sub/superscripts are offset relative to the
# nucleus edge for non-slanted nuclei
delta = 0.025
# Additional percentage of last character height above 2/3 of the
# x-height that superscripts are offset relative to the subscript
# for slanted nuclei
delta_slanted = 0.2
# Percentage of x-height that superscripts and subscripts are offset for
# integrals
delta_integral = 0.1
class STIXFontConstants(FontConstantsBase):
script_space = 0.1
sup1 = 0.8
sub2 = 0.6
delta = 0.05
delta_slanted = 0.3
delta_integral = 0.3
class STIXSansFontConstants(FontConstantsBase):
script_space = 0.05
sup1 = 0.8
delta_slanted = 0.6
delta_integral = 0.3
_font_constant_mapping = {
'DejaVu Sans': DejaVuSansFontConstants,
'DejaVu Sans Mono': DejaVuSansFontConstants,
'DejaVu Serif': DejaVuSerifFontConstants,
'cmb10': ComputerModernFontConstants,
'cmex10': ComputerModernFontConstants,
'cmmi10': ComputerModernFontConstants,
'cmr10': ComputerModernFontConstants,
'cmss10': ComputerModernFontConstants,
'cmsy10': ComputerModernFontConstants,
'cmtt10': ComputerModernFontConstants,
'STIXGeneral': STIXFontConstants,
'STIXNonUnicode': STIXFontConstants,
'STIXSizeFiveSym': STIXFontConstants,
'STIXSizeFourSym': STIXFontConstants,
'STIXSizeThreeSym': STIXFontConstants,
'STIXSizeTwoSym': STIXFontConstants,
'STIXSizeOneSym': STIXFontConstants,
# Map the fonts we used to ship, just for good measure
'Bitstream Vera Sans': DejaVuSansFontConstants,
'Bitstream Vera': DejaVuSansFontConstants,
}
def _get_font_constant_set(state):
constants = _font_constant_mapping.get(
state.fontset._get_font(state.font).family_name, FontConstantsBase)
# STIX sans isn't really its own fonts, just different code points
# in the STIX fonts, so we have to detect this one separately.
if (constants is STIXFontConstants and
isinstance(state.fontset, StixSansFonts)):
return STIXSansFontConstants
return constants | null |
171,340 | import copy
from collections import namedtuple
import enum
import functools
import logging
import os
import re
import types
import unicodedata
import numpy as np
from pyparsing import (
Empty, Forward, Literal, NotAny, oneOf, OneOrMore, Optional,
ParseBaseException, ParseException, ParseExpression, ParseFatalException,
ParserElement, ParseResults, QuotedString, Regex, StringEnd, ZeroOrMore,
pyparsing_common)
import matplotlib as mpl
from . import _api, cbook
from ._mathtext_data import (
latex_to_bakoma, stix_glyph_fixes, stix_virtual_fonts, tex2uni)
from .font_manager import FontProperties, findfont, get_font
from .ft2font import FT2Image, KERNING_DEFAULT
class Output:
r"""
Result of `ship`\ping a box: lists of positioned glyphs and rectangles.
This class is not exposed to end users, but converted to a `VectorParse` or
a `RasterParse` by `.MathTextParser.parse`.
"""
def __init__(self, box):
self.box = box
self.glyphs = [] # (ox, oy, info)
self.rects = [] # (x1, y1, x2, y2)
def to_vector(self):
w, h, d = map(
np.ceil, [self.box.width, self.box.height, self.box.depth])
gs = [(info.font, info.fontsize, info.num, ox, h - oy + info.offset)
for ox, oy, info in self.glyphs]
rs = [(x1, h - y2, x2 - x1, y2 - y1)
for x1, y1, x2, y2 in self.rects]
return VectorParse(w, h + d, d, gs, rs)
def to_raster(self):
# Metrics y's and mathtext y's are oriented in opposite directions,
# hence the switch between ymin and ymax.
xmin = min([*[ox + info.metrics.xmin for ox, oy, info in self.glyphs],
*[x1 for x1, y1, x2, y2 in self.rects], 0]) - 1
ymin = min([*[oy - info.metrics.ymax for ox, oy, info in self.glyphs],
*[y1 for x1, y1, x2, y2 in self.rects], 0]) - 1
xmax = max([*[ox + info.metrics.xmax for ox, oy, info in self.glyphs],
*[x2 for x1, y1, x2, y2 in self.rects], 0]) + 1
ymax = max([*[oy - info.metrics.ymin for ox, oy, info in self.glyphs],
*[y2 for x1, y1, x2, y2 in self.rects], 0]) + 1
w = xmax - xmin
h = ymax - ymin - self.box.depth
d = ymax - ymin - self.box.height
image = FT2Image(np.ceil(w), np.ceil(h + max(d, 0)))
# Ideally, we could just use self.glyphs and self.rects here, shifting
# their coordinates by (-xmin, -ymin), but this yields slightly
# different results due to floating point slop; shipping twice is the
# old approach and keeps baseline images backcompat.
shifted = ship(self.box, (-xmin, -ymin))
for ox, oy, info in shifted.glyphs:
info.font.draw_glyph_to_bitmap(
image, ox, oy - info.metrics.iceberg, info.glyph,
antialiased=mpl.rcParams['text.antialiased'])
for x1, y1, x2, y2 in shifted.rects:
height = max(int(y2 - y1) - 1, 0)
if height == 0:
center = (y2 + y1) / 2
y = int(center - (height + 1) / 2)
else:
y = int(y1)
image.draw_rect_filled(int(x1), y, np.ceil(x2), y + height)
return RasterParse(0, 0, w, h + d, d, image)
class Box(Node):
"""A node with a physical location."""
def __init__(self, width, height, depth):
super().__init__()
self.width = width
self.height = height
self.depth = depth
def shrink(self):
super().shrink()
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def render(self, output, x1, y1, x2, y2):
pass
class Char(Node):
"""
A single character.
Unlike TeX, the font information and metrics are stored with each `Char`
to make it easier to lookup the font metrics when needed. Note that TeX
boxes have a width, height, and depth, unlike Type1 and TrueType which use
a full bounding box and an advance in the x-direction. The metrics must
be converted to the TeX model, and the advance (if different from width)
must be converted into a `Kern` node when the `Char` is added to its parent
`Hlist`.
"""
def __init__(self, c, state):
super().__init__()
self.c = c
self.fontset = state.fontset
self.font = state.font
self.font_class = state.font_class
self.fontsize = state.fontsize
self.dpi = state.dpi
# The real width, height and depth will be set during the
# pack phase, after we know the real fontsize
self._update_metrics()
def __repr__(self):
return '`%s`' % self.c
def _update_metrics(self):
metrics = self._metrics = self.fontset.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
if self.c == ' ':
self.width = metrics.advance
else:
self.width = metrics.width
self.height = metrics.iceberg
self.depth = -(metrics.iceberg - metrics.height)
def is_slanted(self):
return self._metrics.slanted
def get_kerning(self, next):
"""
Return the amount of kerning between this and the given character.
This method is called when characters are strung together into `Hlist`
to create `Kern` nodes.
"""
advance = self._metrics.advance - self.width
kern = 0.
if isinstance(next, Char):
kern = self.fontset.get_kern(
self.font, self.font_class, self.c, self.fontsize,
next.font, next.font_class, next.c, next.fontsize,
self.dpi)
return advance + kern
def render(self, output, x, y):
self.fontset.render_glyph(
output, x, y,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
def shrink(self):
super().shrink()
if self.size < NUM_SIZE_LEVELS:
self.fontsize *= SHRINK_FACTOR
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
class List(Box):
"""A list of nodes (either horizontal or vertical)."""
def __init__(self, elements):
super().__init__(0., 0., 0.)
self.shift_amount = 0. # An arbitrary offset
self.children = elements # The child nodes of this list
# The following parameters are set in the vpack and hpack functions
self.glue_set = 0. # The glue setting of this list
self.glue_sign = 0 # 0: normal, -1: shrinking, 1: stretching
self.glue_order = 0 # The order of infinity (0 - 3) for the glue
def __repr__(self):
return '%s<w=%.02f h=%.02f d=%.02f s=%.02f>[%s]' % (
super().__repr__(),
self.width, self.height,
self.depth, self.shift_amount,
', '.join([repr(x) for x in self.children]))
def _set_glue(self, x, sign, totals, error_type):
self.glue_order = o = next(
# Highest order of glue used by the members of this list.
(i for i in range(len(totals))[::-1] if totals[i] != 0), 0)
self.glue_sign = sign
if totals[o] != 0.:
self.glue_set = x / totals[o]
else:
self.glue_sign = 0
self.glue_ratio = 0.
if o == 0:
if len(self.children):
_log.warning("%s %s: %r",
error_type, type(self).__name__, self)
def shrink(self):
for child in self.children:
child.shrink()
super().shrink()
if self.size < NUM_SIZE_LEVELS:
self.shift_amount *= SHRINK_FACTOR
self.glue_set *= SHRINK_FACTOR
class Hlist(List):
"""A horizontal list of boxes."""
def __init__(self, elements, w=0., m='additional', do_kern=True):
super().__init__(elements)
if do_kern:
self.kern()
self.hpack(w=w, m=m)
def kern(self):
"""
Insert `Kern` nodes between `Char` nodes to set kerning.
The `Char` nodes themselves determine the amount of kerning they need
(in `~Char.get_kerning`), and this function just creates the correct
linked list.
"""
new_children = []
num_children = len(self.children)
if num_children:
for i in range(num_children):
elem = self.children[i]
if i < num_children - 1:
next = self.children[i + 1]
else:
next = None
new_children.append(elem)
kerning_distance = elem.get_kerning(next)
if kerning_distance != 0.:
kern = Kern(kerning_distance)
new_children.append(kern)
self.children = new_children
# This is a failed experiment to fake cross-font kerning.
# def get_kerning(self, next):
# if len(self.children) >= 2 and isinstance(self.children[-2], Char):
# if isinstance(next, Char):
# print "CASE A"
# return self.children[-2].get_kerning(next)
# elif (isinstance(next, Hlist) and len(next.children)
# and isinstance(next.children[0], Char)):
# print "CASE B"
# result = self.children[-2].get_kerning(next.children[0])
# print result
# return result
# return 0.0
def hpack(self, w=0., m='additional'):
r"""
Compute the dimensions of the resulting boxes, and adjust the glue if
one of those dimensions is pre-specified. The computed sizes normally
enclose all of the material inside the new box; but some items may
stick out if negative glue is used, if the box is overfull, or if a
``\vbox`` includes other boxes that have been shifted left.
Parameters
----------
w : float, default: 0
A width.
m : {'exactly', 'additional'}, default: 'additional'
Whether to produce a box whose width is 'exactly' *w*; or a box
with the natural width of the contents, plus *w* ('additional').
Notes
-----
The defaults produce a box with the natural width of the contents.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
# self.shift_amount = 0.
h = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Char):
x += p.width
h = max(h, p.height)
d = max(d, p.depth)
elif isinstance(p, Box):
x += p.width
if not np.isinf(p.height) and not np.isinf(p.depth):
s = getattr(p, 'shift_amount', 0.)
h = max(h, p.height - s)
d = max(d, p.depth + s)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += p.width
self.height = h
self.depth = d
if m == 'additional':
w += x
self.width = w
x = w - x
if x == 0.:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overful")
else:
self._set_glue(x, -1, total_shrink, "Underful")
class Glue(Node):
"""
Most of the information in this object is stored in the underlying
``_GlueSpec`` class, which is shared between multiple glue objects.
(This is a memory optimization which probably doesn't matter anymore, but
it's easier to stick to what TeX does.)
"""
def __init__(self, glue_type):
super().__init__()
if isinstance(glue_type, str):
glue_spec = _GlueSpec._named[glue_type]
elif isinstance(glue_type, _GlueSpec):
glue_spec = glue_type
else:
raise ValueError("glue_type must be a glue spec name or instance")
self.glue_spec = glue_spec
def shrink(self):
super().shrink()
if self.size < NUM_SIZE_LEVELS:
g = self.glue_spec
self.glue_spec = g._replace(width=g.width * SHRINK_FACTOR)
class Kern(Node):
"""
A `Kern` node has a width field to specify a (normally
negative) amount of spacing. This spacing correction appears in
horizontal lists between letters like A and V when the font
designer said that it looks better to move them closer together or
further apart. A kern node can also appear in a vertical list,
when its *width* denotes additional spacing in the vertical
direction.
"""
height = 0
depth = 0
def __init__(self, width):
super().__init__()
self.width = width
def __repr__(self):
return "k%.02f" % self.width
def shrink(self):
super().shrink()
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
The provided code snippet includes necessary dependencies for implementing the `ship` function. Write a Python function `def ship(box, xy=(0, 0))` to solve the following problem:
Ship out *box* at offset *xy*, converting it to an `Output`. Since boxes can be inside of boxes inside of boxes, the main work of `ship` is done by two mutually recursive routines, `hlist_out` and `vlist_out`, which traverse the `Hlist` nodes and `Vlist` nodes inside of horizontal and vertical boxes. The global variables used in TeX to store state as it processes have become local variables here.
Here is the function:
def ship(box, xy=(0, 0)):
"""
Ship out *box* at offset *xy*, converting it to an `Output`.
Since boxes can be inside of boxes inside of boxes, the main work of `ship`
is done by two mutually recursive routines, `hlist_out` and `vlist_out`,
which traverse the `Hlist` nodes and `Vlist` nodes inside of horizontal
and vertical boxes. The global variables used in TeX to store state as it
processes have become local variables here.
"""
ox, oy = xy
cur_v = 0.
cur_h = 0.
off_h = ox
off_v = oy + box.height
output = Output(box)
def clamp(value):
return -1e9 if value < -1e9 else +1e9 if value > +1e9 else value
def hlist_out(box):
nonlocal cur_v, cur_h, off_h, off_v
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
base_line = cur_v
left_edge = cur_h
for p in box.children:
if isinstance(p, Char):
p.render(output, cur_h + off_h, cur_v + off_v)
cur_h += p.width
elif isinstance(p, Kern):
cur_h += p.width
elif isinstance(p, List):
# node623
if len(p.children) == 0:
cur_h += p.width
else:
edge = cur_h
cur_v = base_line + p.shift_amount
if isinstance(p, Hlist):
hlist_out(p)
else:
# p.vpack(box.height + box.depth, 'exactly')
vlist_out(p)
cur_h = edge + p.width
cur_v = base_line
elif isinstance(p, Box):
# node624
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if np.isinf(rule_height):
rule_height = box.height
if np.isinf(rule_depth):
rule_depth = box.depth
if rule_height > 0 and rule_width > 0:
cur_v = base_line + rule_depth
p.render(output,
cur_h + off_h, cur_v + off_v,
rule_width, rule_height)
cur_v = base_line
cur_h += rule_width
elif isinstance(p, Glue):
# node625
glue_spec = p.glue_spec
rule_width = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(box.glue_set * cur_glue))
elif glue_spec.shrink_order == glue_order:
cur_glue += glue_spec.shrink
cur_g = round(clamp(box.glue_set * cur_glue))
rule_width += cur_g
cur_h += rule_width
def vlist_out(box):
nonlocal cur_v, cur_h, off_h, off_v
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
left_edge = cur_h
cur_v -= box.height
top_edge = cur_v
for p in box.children:
if isinstance(p, Kern):
cur_v += p.width
elif isinstance(p, List):
if len(p.children) == 0:
cur_v += p.height + p.depth
else:
cur_v += p.height
cur_h = left_edge + p.shift_amount
save_v = cur_v
p.width = box.width
if isinstance(p, Hlist):
hlist_out(p)
else:
vlist_out(p)
cur_v = save_v + p.depth
cur_h = left_edge
elif isinstance(p, Box):
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if np.isinf(rule_width):
rule_width = box.width
rule_height += rule_depth
if rule_height > 0 and rule_depth > 0:
cur_v += rule_height
p.render(output,
cur_h + off_h, cur_v + off_v,
rule_width, rule_height)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
rule_height = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(box.glue_set * cur_glue))
elif glue_spec.shrink_order == glue_order: # shrinking
cur_glue += glue_spec.shrink
cur_g = round(clamp(box.glue_set * cur_glue))
rule_height += cur_g
cur_v += rule_height
elif isinstance(p, Char):
raise RuntimeError(
"Internal mathtext error: Char node found in vlist")
hlist_out(box)
return output | Ship out *box* at offset *xy*, converting it to an `Output`. Since boxes can be inside of boxes inside of boxes, the main work of `ship` is done by two mutually recursive routines, `hlist_out` and `vlist_out`, which traverse the `Hlist` nodes and `Vlist` nodes inside of horizontal and vertical boxes. The global variables used in TeX to store state as it processes have become local variables here. |
171,341 | import copy
from collections import namedtuple
import enum
import functools
import logging
import os
import re
import types
import unicodedata
import numpy as np
from pyparsing import (
Empty, Forward, Literal, NotAny, oneOf, OneOrMore, Optional,
ParseBaseException, ParseException, ParseExpression, ParseFatalException,
ParserElement, ParseResults, QuotedString, Regex, StringEnd, ZeroOrMore,
pyparsing_common)
import matplotlib as mpl
from . import _api, cbook
from ._mathtext_data import (
latex_to_bakoma, stix_glyph_fixes, stix_virtual_fonts, tex2uni)
from .font_manager import FontProperties, findfont, get_font
from .ft2font import FT2Image, KERNING_DEFAULT
def Error(msg):
"""Helper class to raise parser errors."""
def raise_error(s, loc, toks):
raise ParseFatalException(s, loc, msg)
return Empty().setParseAction(raise_error)
class ParseExpression(ParserElement):
"""
Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
"""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, _generatorType ):
exprs = list(exprs)
if isinstance( exprs, basestring ):
self.exprs = [ ParserElement._literalStringClass( exprs ) ]
elif isinstance( exprs, Iterable ):
exprs = list(exprs)
# if sequence of strings provided, wrap with Literal
if all(isinstance(expr, basestring) for expr in exprs):
exprs = map(ParserElement._literalStringClass, exprs)
self.exprs = list(exprs)
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
self.errmsg = "Expected " + _ustr(self)
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
def copy(self):
ret = super(ParseExpression,self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
The provided code snippet includes necessary dependencies for implementing the `cmd` function. Write a Python function `def cmd(expr, args)` to solve the following problem:
r""" Helper to define TeX commands. ``cmd("\cmd", args)`` is equivalent to ``"\cmd" - (args | Error("Expected \cmd{arg}{...}"))`` where the names in the error message are taken from element names in *args*. If *expr* already includes arguments (e.g. "\cmd{arg}{...}"), then they are stripped when constructing the parse element, but kept (and *expr* is used as is) in the error message.
Here is the function:
def cmd(expr, args):
r"""
Helper to define TeX commands.
``cmd("\cmd", args)`` is equivalent to
``"\cmd" - (args | Error("Expected \cmd{arg}{...}"))`` where the names in
the error message are taken from element names in *args*. If *expr*
already includes arguments (e.g. "\cmd{arg}{...}"), then they are stripped
when constructing the parse element, but kept (and *expr* is used as is) in
the error message.
"""
def names(elt):
if isinstance(elt, ParseExpression):
for expr in elt.exprs:
yield from names(expr)
elif elt.resultsName:
yield elt.resultsName
csname = expr.split("{", 1)[0]
err = (csname + "".join("{%s}" % name for name in names(args))
if expr == csname else expr)
return csname - (args | Error(f"Expected {err}")) | r""" Helper to define TeX commands. ``cmd("\cmd", args)`` is equivalent to ``"\cmd" - (args | Error("Expected \cmd{arg}{...}"))`` where the names in the error message are taken from element names in *args*. If *expr* already includes arguments (e.g. "\cmd{arg}{...}"), then they are stripped when constructing the parse element, but kept (and *expr* is used as is) in the error message. |
171,342 | import dataclasses
from . import _api
from .ft2font import KERNING_DEFAULT, LOAD_NO_HINTING
def warn_on_missing_glyph(codepoint):
_api.warn_external(
"Glyph {} ({}) missing from current font.".format(
codepoint,
chr(codepoint).encode("ascii", "namereplace").decode("ascii")))
block = ("Hebrew" if 0x0590 <= codepoint <= 0x05ff else
"Arabic" if 0x0600 <= codepoint <= 0x06ff else
"Devanagari" if 0x0900 <= codepoint <= 0x097f else
"Bengali" if 0x0980 <= codepoint <= 0x09ff else
"Gurmukhi" if 0x0a00 <= codepoint <= 0x0a7f else
"Gujarati" if 0x0a80 <= codepoint <= 0x0aff else
"Oriya" if 0x0b00 <= codepoint <= 0x0b7f else
"Tamil" if 0x0b80 <= codepoint <= 0x0bff else
"Telugu" if 0x0c00 <= codepoint <= 0x0c7f else
"Kannada" if 0x0c80 <= codepoint <= 0x0cff else
"Malayalam" if 0x0d00 <= codepoint <= 0x0d7f else
"Sinhala" if 0x0d80 <= codepoint <= 0x0dff else
None)
if block:
_api.warn_external(
f"Matplotlib currently does not support {block} natively.") | null |
171,343 | import dataclasses
from . import _api
from .ft2font import KERNING_DEFAULT, LOAD_NO_HINTING
LayoutItem = dataclasses.make_dataclass(
"LayoutItem", ["ft_object", "char", "glyph_idx", "x", "prev_kern"])
The provided code snippet includes necessary dependencies for implementing the `layout` function. Write a Python function `def layout(string, font, *, kern_mode=KERNING_DEFAULT)` to solve the following problem:
Render *string* with *font*. For each character in *string*, yield a (glyph-index, x-position) pair. When such a pair is yielded, the font's glyph is set to the corresponding character. Parameters ---------- string : str The string to be rendered. font : FT2Font The font. kern_mode : int A FreeType kerning mode. Yields ------ glyph_index : int x_position : float
Here is the function:
def layout(string, font, *, kern_mode=KERNING_DEFAULT):
"""
Render *string* with *font*. For each character in *string*, yield a
(glyph-index, x-position) pair. When such a pair is yielded, the font's
glyph is set to the corresponding character.
Parameters
----------
string : str
The string to be rendered.
font : FT2Font
The font.
kern_mode : int
A FreeType kerning mode.
Yields
------
glyph_index : int
x_position : float
"""
x = 0
prev_glyph_idx = None
char_to_font = font._get_fontmap(string)
base_font = font
for char in string:
# This has done the fallback logic
font = char_to_font.get(char, base_font)
glyph_idx = font.get_char_index(ord(char))
kern = (
base_font.get_kerning(prev_glyph_idx, glyph_idx, kern_mode) / 64
if prev_glyph_idx is not None else 0.
)
x += kern
glyph = font.load_glyph(glyph_idx, flags=LOAD_NO_HINTING)
yield LayoutItem(font, char, glyph_idx, x, kern)
x += glyph.linearHoriAdvance / 65536
prev_glyph_idx = glyph_idx | Render *string* with *font*. For each character in *string*, yield a (glyph-index, x-position) pair. When such a pair is yielded, the font's glyph is set to the corresponding character. Parameters ---------- string : str The string to be rendered. font : FT2Font The font. kern_mode : int A FreeType kerning mode. Yields ------ glyph_index : int x_position : float |
171,344 | import os
import sys
import time
from collections import OrderedDict
from datetime import timedelta
from ._internal_utils import to_native_string
from .adapters import HTTPAdapter
from .auth import _basic_auth_str
from .compat import Mapping, cookielib, urljoin, urlparse
from .cookies import (
RequestsCookieJar,
cookiejar_from_dict,
extract_cookies_to_jar,
merge_cookies,
)
from .exceptions import (
ChunkedEncodingError,
ContentDecodingError,
InvalidSchema,
TooManyRedirects,
)
from .hooks import default_hooks, dispatch_hook
from .models import ( # noqa: F401
DEFAULT_REDIRECT_LIMIT,
REDIRECT_STATI,
PreparedRequest,
Request,
)
from .status_codes import codes
from .structures import CaseInsensitiveDict
from .utils import ( # noqa: F401
DEFAULT_PORTS,
default_headers,
get_auth_from_url,
get_environ_proxies,
get_netrc_auth,
requote_uri,
resolve_proxies,
rewind_body,
should_bypass_proxies,
to_key_val_list,
)
def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
"""Determines appropriate setting for a given request, taking into account
the explicit setting on that request, and the setting in the session. If a
setting is a dictionary, they will be merged together using `dict_class`
"""
if session_setting is None:
return request_setting
if request_setting is None:
return session_setting
# Bypass if not a dictionary (e.g. verify)
if not (
isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping)
):
return request_setting
merged_setting = dict_class(to_key_val_list(session_setting))
merged_setting.update(to_key_val_list(request_setting))
# Remove keys that are set to None. Extract keys first to avoid altering
# the dictionary during iteration.
none_keys = [k for (k, v) in merged_setting.items() if v is None]
for key in none_keys:
del merged_setting[key]
return merged_setting
class OrderedDict(dict):
def __init__(self, data=None, **kwargs):
self._keys = self.keys(data, kwargs.get("keys"))
self._default_factory = kwargs.get("default_factory")
if data is None:
dict.__init__(self)
else:
dict.__init__(self, data)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._keys.remove(key)
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __iter__(self):
return (key for key in self.keys())
def __missing__(self, key):
if not self._default_factory and key not in self._keys:
raise KeyError()
return self._default_factory()
def __setitem__(self, key, item):
dict.__setitem__(self, key, item)
if key not in self._keys:
self._keys.append(key)
def clear(self):
dict.clear(self)
self._keys.clear()
def copy(self):
d = dict.copy(self)
d._keys = self._keys
return d
def items(self):
# returns iterator under python 3 and list under python 2
return zip(self.keys(), self.values())
def keys(self, data=None, keys=None):
if data:
if keys:
assert isinstance(keys, list)
assert len(data) == len(keys)
return keys
else:
assert (
isinstance(data, dict)
or isinstance(data, OrderedDict)
or isinstance(data, list)
)
if isinstance(data, dict) or isinstance(data, OrderedDict):
return data.keys()
elif isinstance(data, list):
return [key for (key, value) in data]
elif "_keys" in self.__dict__:
return self._keys
else:
return []
def popitem(self):
if not self._keys:
raise KeyError()
key = self._keys.pop()
value = self[key]
del self[key]
return (key, value)
def setdefault(self, key, failobj=None):
dict.setdefault(self, key, failobj)
if key not in self._keys:
self._keys.append(key)
def update(self, data):
dict.update(self, data)
for key in self.keys(data):
if key not in self._keys:
self._keys.append(key)
def values(self):
# returns iterator under python 3
return map(self.get, self._keys)
The provided code snippet includes necessary dependencies for implementing the `merge_hooks` function. Write a Python function `def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict)` to solve the following problem:
Properly merges both requests and session hooks. This is necessary because when request_hooks == {'response': []}, the merge breaks Session hooks entirely.
Here is the function:
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
if session_hooks is None or session_hooks.get("response") == []:
return request_hooks
if request_hooks is None or request_hooks.get("response") == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class) | Properly merges both requests and session hooks. This is necessary because when request_hooks == {'response': []}, the merge breaks Session hooks entirely. |
171,354 | import codecs
import contextlib
import io
import os
import re
import socket
import struct
import sys
import tempfile
import warnings
import zipfile
from collections import OrderedDict
from urllib3.util import make_headers, parse_url
from . import certs
from .__version__ import __version__
from ._internal_utils import HEADER_VALIDATORS, to_native_string
from .compat import (
Mapping,
basestring,
bytes,
getproxies,
getproxies_environment,
integer_types,
)
from .compat import parse_http_list as _parse_list_header
from .compat import (
proxy_bypass,
proxy_bypass_environment,
quote,
str,
unquote,
urlparse,
urlunparse,
)
from .cookies import cookiejar_from_dict
from .exceptions import (
FileModeWarning,
InvalidHeader,
InvalidURL,
UnrewindableBodyError,
)
from .structures import CaseInsensitiveDict
NETRC_FILES = (".netrc", "_netrc")
str = str
class NetrcParseError(Exception):
filename: Optional[str]
lineno: Optional[int]
msg: str
class netrc:
hosts: Dict[str, _NetrcTuple]
macros: Dict[str, List[str]]
def __init__(self, file: str = ...) -> None: ...
def authenticators(self, host: str) -> Optional[_NetrcTuple]: ...
The provided code snippet includes necessary dependencies for implementing the `get_netrc_auth` function. Write a Python function `def get_netrc_auth(url, raise_errors=False)` to solve the following problem:
Returns the Requests tuple auth for a given url from netrc.
Here is the function:
def get_netrc_auth(url, raise_errors=False):
"""Returns the Requests tuple auth for a given url from netrc."""
netrc_file = os.environ.get("NETRC")
if netrc_file is not None:
netrc_locations = (netrc_file,)
else:
netrc_locations = (f"~/{f}" for f in NETRC_FILES)
try:
from netrc import NetrcParseError, netrc
netrc_path = None
for f in netrc_locations:
try:
loc = os.path.expanduser(f)
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See https://bugs.python.org/issue20164 &
# https://github.com/psf/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc. This weird `if...encode`` dance is
# used for Python 3.2, which doesn't support unicode literals.
splitstr = b":"
if isinstance(url, str):
splitstr = splitstr.decode("ascii")
host = ri.netloc.split(splitstr)[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = 0 if _netrc[0] else 1
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, OSError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth unless explicitly asked to raise errors.
if raise_errors:
raise
# App Engine hackiness.
except (ImportError, AttributeError):
pass | Returns the Requests tuple auth for a given url from netrc. |
171,357 | import codecs
import contextlib
import io
import os
import re
import socket
import struct
import sys
import tempfile
import warnings
import zipfile
from collections import OrderedDict
from urllib3.util import make_headers, parse_url
from . import certs
from .__version__ import __version__
from ._internal_utils import HEADER_VALIDATORS, to_native_string
from .compat import (
Mapping,
basestring,
bytes,
getproxies,
getproxies_environment,
integer_types,
)
from .compat import parse_http_list as _parse_list_header
from .compat import (
proxy_bypass,
proxy_bypass_environment,
quote,
str,
unquote,
urlparse,
urlunparse,
)
from .cookies import cookiejar_from_dict
from .exceptions import (
FileModeWarning,
InvalidHeader,
InvalidURL,
UnrewindableBodyError,
)
from .structures import CaseInsensitiveDict
class OrderedDict(dict):
def __init__(self, data=None, **kwargs):
self._keys = self.keys(data, kwargs.get("keys"))
self._default_factory = kwargs.get("default_factory")
if data is None:
dict.__init__(self)
else:
dict.__init__(self, data)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._keys.remove(key)
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __iter__(self):
return (key for key in self.keys())
def __missing__(self, key):
if not self._default_factory and key not in self._keys:
raise KeyError()
return self._default_factory()
def __setitem__(self, key, item):
dict.__setitem__(self, key, item)
if key not in self._keys:
self._keys.append(key)
def clear(self):
dict.clear(self)
self._keys.clear()
def copy(self):
d = dict.copy(self)
d._keys = self._keys
return d
def items(self):
# returns iterator under python 3 and list under python 2
return zip(self.keys(), self.values())
def keys(self, data=None, keys=None):
if data:
if keys:
assert isinstance(keys, list)
assert len(data) == len(keys)
return keys
else:
assert (
isinstance(data, dict)
or isinstance(data, OrderedDict)
or isinstance(data, list)
)
if isinstance(data, dict) or isinstance(data, OrderedDict):
return data.keys()
elif isinstance(data, list):
return [key for (key, value) in data]
elif "_keys" in self.__dict__:
return self._keys
else:
return []
def popitem(self):
if not self._keys:
raise KeyError()
key = self._keys.pop()
value = self[key]
del self[key]
return (key, value)
def setdefault(self, key, failobj=None):
dict.setdefault(self, key, failobj)
if key not in self._keys:
self._keys.append(key)
def update(self, data):
dict.update(self, data)
for key in self.keys(data):
if key not in self._keys:
self._keys.append(key)
def values(self):
# returns iterator under python 3
return map(self.get, self._keys)
str = str
bytes = bytes
The provided code snippet includes necessary dependencies for implementing the `from_key_val_list` function. Write a Python function `def from_key_val_list(value)` to solve the following problem:
Take an object and test to see if it can be represented as a dictionary. Unless it can not be represented as such, return an OrderedDict, e.g., :: >>> from_key_val_list([('key', 'val')]) OrderedDict([('key', 'val')]) >>> from_key_val_list('string') Traceback (most recent call last): ... ValueError: cannot encode objects that are not 2-tuples >>> from_key_val_list({'key': 'val'}) OrderedDict([('key', 'val')]) :rtype: OrderedDict
Here is the function:
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
Traceback (most recent call last):
...
ValueError: cannot encode objects that are not 2-tuples
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
:rtype: OrderedDict
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError("cannot encode objects that are not 2-tuples")
return OrderedDict(value) | Take an object and test to see if it can be represented as a dictionary. Unless it can not be represented as such, return an OrderedDict, e.g., :: >>> from_key_val_list([('key', 'val')]) OrderedDict([('key', 'val')]) >>> from_key_val_list('string') Traceback (most recent call last): ... ValueError: cannot encode objects that are not 2-tuples >>> from_key_val_list({'key': 'val'}) OrderedDict([('key', 'val')]) :rtype: OrderedDict |
171,389 | import hashlib
import os
import re
import threading
import time
import warnings
from base64 import b64encode
from ._internal_utils import to_native_string
from .compat import basestring, str, urlparse
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header
import warnings
warnings.simplefilter("ignore", DependencyWarning)
warnings.simplefilter("default", FileModeWarning, append=True)
def b64encode(s: _encodable, altchars: Optional[bytes] = ...) -> bytes: ...
def to_native_string(string, encoding="ascii"):
"""Given a string object, regardless of type, returns a representation of
that string in the native string type, encoding and decoding where
necessary. This assumes ASCII unless told otherwise.
"""
if isinstance(string, builtin_str):
out = string
else:
out = string.decode(encoding)
return out
str = str
basestring = (str, bytes)
The provided code snippet includes necessary dependencies for implementing the `_basic_auth_str` function. Write a Python function `def _basic_auth_str(username, password)` to solve the following problem:
Returns a Basic Auth string.
Here is the function:
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
# "I want us to put a big-ol' comment on top of it that
# says that this behaviour is dumb but we need to preserve
# it because people are relying on it."
# - Lukasa
#
# These are here solely to maintain backwards compatibility
# for things like ints. This will be removed in 3.0.0.
if not isinstance(username, basestring):
warnings.warn(
"Non-string usernames will no longer be supported in Requests "
"3.0.0. Please convert the object you've passed in ({!r}) to "
"a string or bytes object in the near future to avoid "
"problems.".format(username),
category=DeprecationWarning,
)
username = str(username)
if not isinstance(password, basestring):
warnings.warn(
"Non-string passwords will no longer be supported in Requests "
"3.0.0. Please convert the object you've passed in ({!r}) to "
"a string or bytes object in the near future to avoid "
"problems.".format(type(password)),
category=DeprecationWarning,
)
password = str(password)
# -- End Removal --
if isinstance(username, str):
username = username.encode("latin1")
if isinstance(password, str):
password = password.encode("latin1")
authstr = "Basic " + to_native_string(
b64encode(b":".join((username, password))).strip()
)
return authstr | Returns a Basic Auth string. |
171,390 | import json
version_json = '''
{
"date": "2022-06-23T17:03:26-0700",
"dirty": false,
"error": null,
"full-revisionid": "d8c6624e9547587d6c57d27c97fb7fea717455e7",
"version": "1.8.2.2"
}
'''
def get_versions():
return json.loads(version_json) | null |
171,391 | from __future__ import division
from itertools import tee
from operator import itemgetter
from collections import defaultdict
from math import log
def score(count_bigram, count1, count2, n_words):
def pairwise(iterable):
def process_tokens(words, normalize_plurals=True):
def unigrams_and_bigrams(words, stopwords, normalize_plurals=True, collocation_threshold=30):
# We must create the bigrams before removing the stopword tokens from the words, or else we get bigrams like
# "thank much" from "thank you very much".
# We don't allow any of the words in the bigram to be stopwords
bigrams = list(p for p in pairwise(words) if not any(w.lower() in stopwords for w in p))
unigrams = list(w for w in words if w.lower() not in stopwords)
n_words = len(unigrams)
counts_unigrams, standard_form = process_tokens(
unigrams, normalize_plurals=normalize_plurals)
counts_bigrams, standard_form_bigrams = process_tokens(
[" ".join(bigram) for bigram in bigrams],
normalize_plurals=normalize_plurals)
# create a copy of counts_unigram so the score computation is not changed
orig_counts = counts_unigrams.copy()
# Include bigrams that are also collocations
for bigram_string, count in counts_bigrams.items():
bigram = tuple(bigram_string.split(" "))
word1 = standard_form[bigram[0].lower()]
word2 = standard_form[bigram[1].lower()]
collocation_score = score(count, orig_counts[word1], orig_counts[word2], n_words)
if collocation_score > collocation_threshold:
# bigram is a collocation
# discount words in unigrams dict. hack because one word might
# appear in multiple collocations at the same time
# (leading to negative counts)
counts_unigrams[word1] -= counts_bigrams[bigram_string]
counts_unigrams[word2] -= counts_bigrams[bigram_string]
counts_unigrams[bigram_string] = counts_bigrams[bigram_string]
for word, count in list(counts_unigrams.items()):
if count <= 0:
del counts_unigrams[word]
return counts_unigrams | null |
171,392 | from __future__ import absolute_import
import sys
import textwrap
import io
import re
import argparse
import wordcloud as wc
import numpy as np
from PIL import Image
from . import __version__
def make_parser():
description = 'A simple command line interface for wordcloud module.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'--text', metavar='file', type=FileType(), default='-',
help='specify file of words to build the word cloud (default: stdin)')
parser.add_argument(
'--regexp', metavar='regexp', default=None, action=RegExpAction,
help='override the regular expression defining what constitutes a word')
parser.add_argument(
'--stopwords', metavar='file', type=FileType(),
help='specify file of stopwords (containing one word per line)'
' to remove from the given text after parsing')
parser.add_argument(
'--imagefile', metavar='file', type=FileType('wb'),
default='-',
help='file the completed PNG image should be written to'
' (default: stdout)')
parser.add_argument(
'--fontfile', metavar='path', dest='font_path',
help='path to font file you wish to use (default: DroidSansMono)')
parser.add_argument(
'--mask', metavar='file', type=argparse.FileType('rb'),
help='mask to use for the image form')
parser.add_argument(
'--colormask', metavar='file', type=argparse.FileType('rb'),
help='color mask to use for image coloring')
parser.add_argument(
'--contour_width', metavar='width', default=0, type=float,
dest='contour_width',
help='if greater than 0, draw mask contour (default: 0)')
parser.add_argument(
'--contour_color', metavar='color', default='black', type=str,
dest='contour_color',
help='use given color as mask contour color -'
' accepts any value from PIL.ImageColor.getcolor')
parser.add_argument(
'--relative_scaling', type=float, default=0,
metavar='rs', help=' scaling of words by frequency (0 - 1)')
parser.add_argument(
'--margin', type=int, default=2,
metavar='width', help='spacing to leave around words')
parser.add_argument(
'--width', type=int, default=400,
metavar='width', help='define output image width')
parser.add_argument(
'--height', type=int, default=200,
metavar='height', help='define output image height')
parser.add_argument(
'--color', metavar='color',
help='use given color as coloring for the image -'
' accepts any value from PIL.ImageColor.getcolor')
parser.add_argument(
'--background', metavar='color', default='black', type=str,
dest='background_color',
help='use given color as background color for the image -'
' accepts any value from PIL.ImageColor.getcolor')
parser.add_argument(
'--no_collocations', action='store_false', dest='collocations',
help='do not add collocations (bigrams) to word cloud '
'(default: add unigrams and bigrams)')
parser.add_argument(
'--include_numbers',
action='store_true',
dest='include_numbers',
help='include numbers in wordcloud?')
parser.add_argument(
'--min_word_length',
type=int,
default=0,
metavar='min_word_length',
dest='min_word_length',
help='only include words with more than X letters')
parser.add_argument(
'--prefer_horizontal',
type=float, default=.9, metavar='ratio',
help='ratio of times to try horizontal fitting as opposed to vertical')
parser.add_argument(
'--scale',
type=float, default=1, metavar='scale',
help='scaling between computation and drawing')
parser.add_argument(
'--colormap',
type=str, default='viridis', metavar='map',
help='matplotlib colormap name')
parser.add_argument(
'--mode',
type=str, default='RGB', metavar='mode',
help='use RGB or RGBA for transparent background')
parser.add_argument(
'--max_words',
type=int, default=200, metavar='N',
help='maximum number of words')
parser.add_argument(
'--min_font_size',
type=int, default=4, metavar='size',
help='smallest font size to use')
parser.add_argument(
'--max_font_size',
type=int, default=None, metavar='size',
help='maximum font size for the largest word')
parser.add_argument(
'--font_step',
type=int, default=1, metavar='step',
help='step size for the font')
parser.add_argument(
'--random_state',
type=int, default=None, metavar='seed',
help='random seed')
parser.add_argument(
'--no_normalize_plurals',
action='store_false',
dest='normalize_plurals',
help='whether to remove trailing \'s\' from words')
parser.add_argument(
'--repeat',
action='store_true',
dest='repeat',
help='whether to repeat words and phrases')
parser.add_argument(
'--version', action='version',
version='%(prog)s {version}'.format(version=__version__))
return parser
class Image:
"""
This class represents an image object. To create
:py:class:`~PIL.Image.Image` objects, use the appropriate factory
functions. There's hardly ever any reason to call the Image constructor
directly.
* :py:func:`~PIL.Image.open`
* :py:func:`~PIL.Image.new`
* :py:func:`~PIL.Image.frombytes`
"""
format = None
format_description = None
_close_exclusive_fp_after_loading = True
def __init__(self):
# FIXME: take "new" parameters / other image?
# FIXME: turn mode and size into delegating properties?
self.im = None
self.mode = ""
self._size = (0, 0)
self.palette = None
self.info = {}
self._category = 0
self.readonly = 0
self.pyaccess = None
self._exif = None
def __getattr__(self, name):
if name == "category":
deprecate("Image categories", 10, "is_animated", plural=True)
return self._category
raise AttributeError(name)
def width(self):
return self.size[0]
def height(self):
return self.size[1]
def size(self):
return self._size
def _new(self, im):
new = Image()
new.im = im
new.mode = im.mode
new._size = im.size
if im.mode in ("P", "PA"):
if self.palette:
new.palette = self.palette.copy()
else:
from . import ImagePalette
new.palette = ImagePalette.ImagePalette()
new.info = self.info.copy()
return new
# Context manager support
def __enter__(self):
return self
def __exit__(self, *args):
if hasattr(self, "fp") and getattr(self, "_exclusive_fp", False):
if getattr(self, "_fp", False):
if self._fp != self.fp:
self._fp.close()
self._fp = DeferredError(ValueError("Operation on closed image"))
if self.fp:
self.fp.close()
self.fp = None
def close(self):
"""
Closes the file pointer, if possible.
This operation will destroy the image core and release its memory.
The image data will be unusable afterward.
This function is required to close images that have multiple frames or
have not had their file read and closed by the
:py:meth:`~PIL.Image.Image.load` method. See :ref:`file-handling` for
more information.
"""
try:
if getattr(self, "_fp", False):
if self._fp != self.fp:
self._fp.close()
self._fp = DeferredError(ValueError("Operation on closed image"))
if self.fp:
self.fp.close()
self.fp = None
except Exception as msg:
logger.debug("Error closing: %s", msg)
if getattr(self, "map", None):
self.map = None
# Instead of simply setting to None, we're setting up a
# deferred error that will better explain that the core image
# object is gone.
self.im = DeferredError(ValueError("Operation on closed image"))
def _copy(self):
self.load()
self.im = self.im.copy()
self.pyaccess = None
self.readonly = 0
def _ensure_mutable(self):
if self.readonly:
self._copy()
else:
self.load()
def _dump(self, file=None, format=None, **options):
suffix = ""
if format:
suffix = "." + format
if not file:
f, filename = tempfile.mkstemp(suffix)
os.close(f)
else:
filename = file
if not filename.endswith(suffix):
filename = filename + suffix
self.load()
if not format or format == "PPM":
self.im.save_ppm(filename)
else:
self.save(filename, format, **options)
return filename
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.mode == other.mode
and self.size == other.size
and self.info == other.info
and self._category == other._category
and self.getpalette() == other.getpalette()
and self.tobytes() == other.tobytes()
)
def __repr__(self):
return "<%s.%s image mode=%s size=%dx%d at 0x%X>" % (
self.__class__.__module__,
self.__class__.__name__,
self.mode,
self.size[0],
self.size[1],
id(self),
)
def _repr_pretty_(self, p, cycle):
"""IPython plain text display support"""
# Same as __repr__ but without unpredictable id(self),
# to keep Jupyter notebook `text/plain` output stable.
p.text(
"<%s.%s image mode=%s size=%dx%d>"
% (
self.__class__.__module__,
self.__class__.__name__,
self.mode,
self.size[0],
self.size[1],
)
)
def _repr_png_(self):
"""iPython display hook support
:returns: png version of the image as bytes
"""
b = io.BytesIO()
try:
self.save(b, "PNG")
except Exception as e:
msg = "Could not save to PNG for display"
raise ValueError(msg) from e
return b.getvalue()
def __array_interface__(self):
# numpy array interface support
new = {"version": 3}
try:
if self.mode == "1":
# Binary images need to be extended from bits to bytes
# See: https://github.com/python-pillow/Pillow/issues/350
new["data"] = self.tobytes("raw", "L")
else:
new["data"] = self.tobytes()
except Exception as e:
if not isinstance(e, (MemoryError, RecursionError)):
try:
import numpy
from packaging.version import parse as parse_version
except ImportError:
pass
else:
if parse_version(numpy.__version__) < parse_version("1.23"):
warnings.warn(e)
raise
new["shape"], new["typestr"] = _conv_type_shape(self)
return new
def __getstate__(self):
return [self.info, self.mode, self.size, self.getpalette(), self.tobytes()]
def __setstate__(self, state):
Image.__init__(self)
info, mode, size, palette, data = state
self.info = info
self.mode = mode
self._size = size
self.im = core.new(mode, size)
if mode in ("L", "LA", "P", "PA") and palette:
self.putpalette(palette)
self.frombytes(data)
def tobytes(self, encoder_name="raw", *args):
"""
Return image as a bytes object.
.. warning::
This method returns the raw image data from the internal
storage. For compressed image data (e.g. PNG, JPEG) use
:meth:`~.save`, with a BytesIO parameter for in-memory
data.
:param encoder_name: What encoder to use. The default is to
use the standard "raw" encoder.
A list of C encoders can be seen under
codecs section of the function array in
:file:`_imaging.c`. Python encoders are
registered within the relevant plugins.
:param args: Extra arguments to the encoder.
:returns: A :py:class:`bytes` object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if encoder_name == "raw" and args == ():
args = self.mode
self.load()
if self.width == 0 or self.height == 0:
return b""
# unpack data
e = _getencoder(self.mode, encoder_name, args)
e.setimage(self.im)
bufsize = max(65536, self.size[0] * 4) # see RawEncode.c
output = []
while True:
bytes_consumed, errcode, data = e.encode(bufsize)
output.append(data)
if errcode:
break
if errcode < 0:
msg = f"encoder error {errcode} in tobytes"
raise RuntimeError(msg)
return b"".join(output)
def tobitmap(self, name="image"):
"""
Returns the image converted to an X11 bitmap.
.. note:: This method only works for mode "1" images.
:param name: The name prefix to use for the bitmap variables.
:returns: A string containing an X11 bitmap.
:raises ValueError: If the mode is not "1"
"""
self.load()
if self.mode != "1":
msg = "not a bitmap"
raise ValueError(msg)
data = self.tobytes("xbm")
return b"".join(
[
f"#define {name}_width {self.size[0]}\n".encode("ascii"),
f"#define {name}_height {self.size[1]}\n".encode("ascii"),
f"static char {name}_bits[] = {{\n".encode("ascii"),
data,
b"};",
]
)
def frombytes(self, data, decoder_name="raw", *args):
"""
Loads this image with pixel data from a bytes object.
This method is similar to the :py:func:`~PIL.Image.frombytes` function,
but loads data into this image instead of creating a new image object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
# default format
if decoder_name == "raw" and args == ():
args = self.mode
# unpack data
d = _getdecoder(self.mode, decoder_name, args)
d.setimage(self.im)
s = d.decode(data)
if s[0] >= 0:
msg = "not enough image data"
raise ValueError(msg)
if s[1] != 0:
msg = "cannot decode image data"
raise ValueError(msg)
def load(self):
"""
Allocates storage for the image and loads the pixel data. In
normal cases, you don't need to call this method, since the
Image class automatically loads an opened image when it is
accessed for the first time.
If the file associated with the image was opened by Pillow, then this
method will close it. The exception to this is if the image has
multiple frames, in which case the file will be left open for seek
operations. See :ref:`file-handling` for more information.
:returns: An image access object.
:rtype: :ref:`PixelAccess` or :py:class:`PIL.PyAccess`
"""
if self.im is not None and self.palette and self.palette.dirty:
# realize palette
mode, arr = self.palette.getdata()
self.im.putpalette(mode, arr)
self.palette.dirty = 0
self.palette.rawmode = None
if "transparency" in self.info and mode in ("LA", "PA"):
if isinstance(self.info["transparency"], int):
self.im.putpalettealpha(self.info["transparency"], 0)
else:
self.im.putpalettealphas(self.info["transparency"])
self.palette.mode = "RGBA"
else:
palette_mode = "RGBA" if mode.startswith("RGBA") else "RGB"
self.palette.mode = palette_mode
self.palette.palette = self.im.getpalette(palette_mode, palette_mode)
if self.im is not None:
if cffi and USE_CFFI_ACCESS:
if self.pyaccess:
return self.pyaccess
from . import PyAccess
self.pyaccess = PyAccess.new(self, self.readonly)
if self.pyaccess:
return self.pyaccess
return self.im.pixel_access(self.readonly)
def verify(self):
"""
Verifies the contents of a file. For data read from a file, this
method attempts to determine if the file is broken, without
actually decoding the image data. If this method finds any
problems, it raises suitable exceptions. If you need to load
the image after using this method, you must reopen the image
file.
"""
pass
def convert(
self, mode=None, matrix=None, dither=None, palette=Palette.WEB, colors=256
):
"""
Returns a converted copy of this image. For the "P" mode, this
method translates pixels through the palette. If mode is
omitted, a mode is chosen so that all information in the image
and the palette can be represented without a palette.
The current version supports all possible conversions between
"L", "RGB" and "CMYK". The ``matrix`` argument only supports "L"
and "RGB".
When translating a color image to greyscale (mode "L"),
the library uses the ITU-R 601-2 luma transform::
L = R * 299/1000 + G * 587/1000 + B * 114/1000
The default method of converting a greyscale ("L") or "RGB"
image into a bilevel (mode "1") image uses Floyd-Steinberg
dither to approximate the original image luminosity levels. If
dither is ``None``, all values larger than 127 are set to 255 (white),
all other values to 0 (black). To use other thresholds, use the
:py:meth:`~PIL.Image.Image.point` method.
When converting from "RGBA" to "P" without a ``matrix`` argument,
this passes the operation to :py:meth:`~PIL.Image.Image.quantize`,
and ``dither`` and ``palette`` are ignored.
When converting from "PA", if an "RGBA" palette is present, the alpha
channel from the image will be used instead of the values from the palette.
:param mode: The requested mode. See: :ref:`concept-modes`.
:param matrix: An optional conversion matrix. If given, this
should be 4- or 12-tuple containing floating point values.
:param dither: Dithering method, used when converting from
mode "RGB" to "P" or from "RGB" or "L" to "1".
Available methods are :data:`Dither.NONE` or :data:`Dither.FLOYDSTEINBERG`
(default). Note that this is not used when ``matrix`` is supplied.
:param palette: Palette to use when converting from mode "RGB"
to "P". Available palettes are :data:`Palette.WEB` or
:data:`Palette.ADAPTIVE`.
:param colors: Number of colors to use for the :data:`Palette.ADAPTIVE`
palette. Defaults to 256.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
has_transparency = self.info.get("transparency") is not None
if not mode and self.mode == "P":
# determine default mode
if self.palette:
mode = self.palette.mode
else:
mode = "RGB"
if mode == "RGB" and has_transparency:
mode = "RGBA"
if not mode or (mode == self.mode and not matrix):
return self.copy()
if matrix:
# matrix conversion
if mode not in ("L", "RGB"):
msg = "illegal conversion"
raise ValueError(msg)
im = self.im.convert_matrix(mode, matrix)
new = self._new(im)
if has_transparency and self.im.bands == 3:
transparency = new.info["transparency"]
def convert_transparency(m, v):
v = m[0] * v[0] + m[1] * v[1] + m[2] * v[2] + m[3] * 0.5
return max(0, min(255, int(v)))
if mode == "L":
transparency = convert_transparency(matrix, transparency)
elif len(mode) == 3:
transparency = tuple(
convert_transparency(matrix[i * 4 : i * 4 + 4], transparency)
for i in range(0, len(transparency))
)
new.info["transparency"] = transparency
return new
if mode == "P" and self.mode == "RGBA":
return self.quantize(colors)
trns = None
delete_trns = False
# transparency handling
if has_transparency:
if (self.mode in ("1", "L", "I") and mode in ("LA", "RGBA")) or (
self.mode == "RGB" and mode == "RGBA"
):
# Use transparent conversion to promote from transparent
# color to an alpha channel.
new_im = self._new(
self.im.convert_transparent(mode, self.info["transparency"])
)
del new_im.info["transparency"]
return new_im
elif self.mode in ("L", "RGB", "P") and mode in ("L", "RGB", "P"):
t = self.info["transparency"]
if isinstance(t, bytes):
# Dragons. This can't be represented by a single color
warnings.warn(
"Palette images with Transparency expressed in bytes should be "
"converted to RGBA images"
)
delete_trns = True
else:
# get the new transparency color.
# use existing conversions
trns_im = Image()._new(core.new(self.mode, (1, 1)))
if self.mode == "P":
trns_im.putpalette(self.palette)
if isinstance(t, tuple):
err = "Couldn't allocate a palette color for transparency"
try:
t = trns_im.palette.getcolor(t, self)
except ValueError as e:
if str(e) == "cannot allocate more than 256 colors":
# If all 256 colors are in use,
# then there is no need for transparency
t = None
else:
raise ValueError(err) from e
if t is None:
trns = None
else:
trns_im.putpixel((0, 0), t)
if mode in ("L", "RGB"):
trns_im = trns_im.convert(mode)
else:
# can't just retrieve the palette number, got to do it
# after quantization.
trns_im = trns_im.convert("RGB")
trns = trns_im.getpixel((0, 0))
elif self.mode == "P" and mode in ("LA", "PA", "RGBA"):
t = self.info["transparency"]
delete_trns = True
if isinstance(t, bytes):
self.im.putpalettealphas(t)
elif isinstance(t, int):
self.im.putpalettealpha(t, 0)
else:
msg = "Transparency for P mode should be bytes or int"
raise ValueError(msg)
if mode == "P" and palette == Palette.ADAPTIVE:
im = self.im.quantize(colors)
new = self._new(im)
from . import ImagePalette
new.palette = ImagePalette.ImagePalette("RGB", new.im.getpalette("RGB"))
if delete_trns:
# This could possibly happen if we requantize to fewer colors.
# The transparency would be totally off in that case.
del new.info["transparency"]
if trns is not None:
try:
new.info["transparency"] = new.palette.getcolor(trns, new)
except Exception:
# if we can't make a transparent color, don't leave the old
# transparency hanging around to mess us up.
del new.info["transparency"]
warnings.warn("Couldn't allocate palette entry for transparency")
return new
if "LAB" in (self.mode, mode):
other_mode = mode if self.mode == "LAB" else self.mode
if other_mode in ("RGB", "RGBA", "RGBX"):
from . import ImageCms
srgb = ImageCms.createProfile("sRGB")
lab = ImageCms.createProfile("LAB")
profiles = [lab, srgb] if self.mode == "LAB" else [srgb, lab]
transform = ImageCms.buildTransform(
profiles[0], profiles[1], self.mode, mode
)
return transform.apply(self)
# colorspace conversion
if dither is None:
dither = Dither.FLOYDSTEINBERG
try:
im = self.im.convert(mode, dither)
except ValueError:
try:
# normalize source image and try again
modebase = getmodebase(self.mode)
if modebase == self.mode:
raise
im = self.im.convert(modebase)
im = im.convert(mode, dither)
except KeyError as e:
msg = "illegal conversion"
raise ValueError(msg) from e
new_im = self._new(im)
if mode == "P" and palette != Palette.ADAPTIVE:
from . import ImagePalette
new_im.palette = ImagePalette.ImagePalette("RGB", list(range(256)) * 3)
if delete_trns:
# crash fail if we leave a bytes transparency in an rgb/l mode.
del new_im.info["transparency"]
if trns is not None:
if new_im.mode == "P":
try:
new_im.info["transparency"] = new_im.palette.getcolor(trns, new_im)
except ValueError as e:
del new_im.info["transparency"]
if str(e) != "cannot allocate more than 256 colors":
# If all 256 colors are in use,
# then there is no need for transparency
warnings.warn(
"Couldn't allocate palette entry for transparency"
)
else:
new_im.info["transparency"] = trns
return new_im
def quantize(
self,
colors=256,
method=None,
kmeans=0,
palette=None,
dither=Dither.FLOYDSTEINBERG,
):
"""
Convert the image to 'P' mode with the specified number
of colors.
:param colors: The desired number of colors, <= 256
:param method: :data:`Quantize.MEDIANCUT` (median cut),
:data:`Quantize.MAXCOVERAGE` (maximum coverage),
:data:`Quantize.FASTOCTREE` (fast octree),
:data:`Quantize.LIBIMAGEQUANT` (libimagequant; check support
using :py:func:`PIL.features.check_feature` with
``feature="libimagequant"``).
By default, :data:`Quantize.MEDIANCUT` will be used.
The exception to this is RGBA images. :data:`Quantize.MEDIANCUT`
and :data:`Quantize.MAXCOVERAGE` do not support RGBA images, so
:data:`Quantize.FASTOCTREE` is used by default instead.
:param kmeans: Integer
:param palette: Quantize to the palette of given
:py:class:`PIL.Image.Image`.
:param dither: Dithering method, used when converting from
mode "RGB" to "P" or from "RGB" or "L" to "1".
Available methods are :data:`Dither.NONE` or :data:`Dither.FLOYDSTEINBERG`
(default).
:returns: A new image
"""
self.load()
if method is None:
# defaults:
method = Quantize.MEDIANCUT
if self.mode == "RGBA":
method = Quantize.FASTOCTREE
if self.mode == "RGBA" and method not in (
Quantize.FASTOCTREE,
Quantize.LIBIMAGEQUANT,
):
# Caller specified an invalid mode.
msg = (
"Fast Octree (method == 2) and libimagequant (method == 3) "
"are the only valid methods for quantizing RGBA images"
)
raise ValueError(msg)
if palette:
# use palette from reference image
palette.load()
if palette.mode != "P":
msg = "bad mode for palette image"
raise ValueError(msg)
if self.mode != "RGB" and self.mode != "L":
msg = "only RGB or L mode images can be quantized to a palette"
raise ValueError(msg)
im = self.im.convert("P", dither, palette.im)
new_im = self._new(im)
new_im.palette = palette.palette.copy()
return new_im
im = self._new(self.im.quantize(colors, method, kmeans))
from . import ImagePalette
mode = im.im.getpalettemode()
palette = im.im.getpalette(mode, mode)[: colors * len(mode)]
im.palette = ImagePalette.ImagePalette(mode, palette)
return im
def copy(self):
"""
Copies this image. Use this method if you wish to paste things
into an image, but still retain the original.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
return self._new(self.im.copy())
__copy__ = copy
def crop(self, box=None):
"""
Returns a rectangular region from this image. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate. See :ref:`coordinate-system`.
Note: Prior to Pillow 3.4.0, this was a lazy operation.
:param box: The crop rectangle, as a (left, upper, right, lower)-tuple.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if box is None:
return self.copy()
if box[2] < box[0]:
msg = "Coordinate 'right' is less than 'left'"
raise ValueError(msg)
elif box[3] < box[1]:
msg = "Coordinate 'lower' is less than 'upper'"
raise ValueError(msg)
self.load()
return self._new(self._crop(self.im, box))
def _crop(self, im, box):
"""
Returns a rectangular region from the core image object im.
This is equivalent to calling im.crop((x0, y0, x1, y1)), but
includes additional sanity checks.
:param im: a core image object
:param box: The crop rectangle, as a (left, upper, right, lower)-tuple.
:returns: A core image object.
"""
x0, y0, x1, y1 = map(int, map(round, box))
absolute_values = (abs(x1 - x0), abs(y1 - y0))
_decompression_bomb_check(absolute_values)
return im.crop((x0, y0, x1, y1))
def draft(self, mode, size):
"""
Configures the image file loader so it returns a version of the
image that as closely as possible matches the given mode and
size. For example, you can use this method to convert a color
JPEG to greyscale while loading it.
If any changes are made, returns a tuple with the chosen ``mode`` and
``box`` with coordinates of the original image within the altered one.
Note that this method modifies the :py:class:`~PIL.Image.Image` object
in place. If the image has already been loaded, this method has no
effect.
Note: This method is not implemented for most images. It is
currently implemented only for JPEG and MPO images.
:param mode: The requested mode.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
"""
pass
def _expand(self, xmargin, ymargin=None):
if ymargin is None:
ymargin = xmargin
self.load()
return self._new(self.im.expand(xmargin, ymargin, 0))
def filter(self, filter):
"""
Filters this image using the given filter. For a list of
available filters, see the :py:mod:`~PIL.ImageFilter` module.
:param filter: Filter kernel.
:returns: An :py:class:`~PIL.Image.Image` object."""
from . import ImageFilter
self.load()
if isinstance(filter, Callable):
filter = filter()
if not hasattr(filter, "filter"):
msg = "filter argument should be ImageFilter.Filter instance or class"
raise TypeError(msg)
multiband = isinstance(filter, ImageFilter.MultibandFilter)
if self.im.bands == 1 or multiband:
return self._new(filter.filter(self.im))
ims = []
for c in range(self.im.bands):
ims.append(self._new(filter.filter(self.im.getband(c))))
return merge(self.mode, ims)
def getbands(self):
"""
Returns a tuple containing the name of each band in this image.
For example, ``getbands`` on an RGB image returns ("R", "G", "B").
:returns: A tuple containing band names.
:rtype: tuple
"""
return ImageMode.getmode(self.mode).bands
def getbbox(self):
"""
Calculates the bounding box of the non-zero regions in the
image.
:returns: The bounding box is returned as a 4-tuple defining the
left, upper, right, and lower pixel coordinate. See
:ref:`coordinate-system`. If the image is completely empty, this
method returns None.
"""
self.load()
return self.im.getbbox()
def getcolors(self, maxcolors=256):
"""
Returns a list of colors used in this image.
The colors will be in the image's mode. For example, an RGB image will
return a tuple of (red, green, blue) color values, and a P image will
return the index of the color in the palette.
:param maxcolors: Maximum number of colors. If this number is
exceeded, this method returns None. The default limit is
256 colors.
:returns: An unsorted list of (count, pixel) values.
"""
self.load()
if self.mode in ("1", "L", "P"):
h = self.im.histogram()
out = []
for i in range(256):
if h[i]:
out.append((h[i], i))
if len(out) > maxcolors:
return None
return out
return self.im.getcolors(maxcolors)
def getdata(self, band=None):
"""
Returns the contents of this image as a sequence object
containing pixel values. The sequence object is flattened, so
that values for line one follow directly after the values of
line zero, and so on.
Note that the sequence object returned by this method is an
internal PIL data type, which only supports certain sequence
operations. To convert it to an ordinary sequence (e.g. for
printing), use ``list(im.getdata())``.
:param band: What band to return. The default is to return
all bands. To return a single band, pass in the index
value (e.g. 0 to get the "R" band from an "RGB" image).
:returns: A sequence-like object.
"""
self.load()
if band is not None:
return self.im.getband(band)
return self.im # could be abused
def getextrema(self):
"""
Gets the minimum and maximum pixel values for each band in
the image.
:returns: For a single-band image, a 2-tuple containing the
minimum and maximum pixel value. For a multi-band image,
a tuple containing one 2-tuple for each band.
"""
self.load()
if self.im.bands > 1:
extrema = []
for i in range(self.im.bands):
extrema.append(self.im.getband(i).getextrema())
return tuple(extrema)
return self.im.getextrema()
def _getxmp(self, xmp_tags):
def get_name(tag):
return tag.split("}")[1]
def get_value(element):
value = {get_name(k): v for k, v in element.attrib.items()}
children = list(element)
if children:
for child in children:
name = get_name(child.tag)
child_value = get_value(child)
if name in value:
if not isinstance(value[name], list):
value[name] = [value[name]]
value[name].append(child_value)
else:
value[name] = child_value
elif value:
if element.text:
value["text"] = element.text
else:
return element.text
return value
if ElementTree is None:
warnings.warn("XMP data cannot be read without defusedxml dependency")
return {}
else:
root = ElementTree.fromstring(xmp_tags)
return {get_name(root.tag): get_value(root)}
def getexif(self):
"""
Gets EXIF data from the image.
:returns: an :py:class:`~PIL.Image.Exif` object.
"""
if self._exif is None:
self._exif = Exif()
self._exif._loaded = False
elif self._exif._loaded:
return self._exif
self._exif._loaded = True
exif_info = self.info.get("exif")
if exif_info is None:
if "Raw profile type exif" in self.info:
exif_info = bytes.fromhex(
"".join(self.info["Raw profile type exif"].split("\n")[3:])
)
elif hasattr(self, "tag_v2"):
self._exif.bigtiff = self.tag_v2._bigtiff
self._exif.endian = self.tag_v2._endian
self._exif.load_from_fp(self.fp, self.tag_v2._offset)
if exif_info is not None:
self._exif.load(exif_info)
# XMP tags
if 0x0112 not in self._exif:
xmp_tags = self.info.get("XML:com.adobe.xmp")
if xmp_tags:
match = re.search(r'tiff:Orientation(="|>)([0-9])', xmp_tags)
if match:
self._exif[0x0112] = int(match[2])
return self._exif
def _reload_exif(self):
if self._exif is None or not self._exif._loaded:
return
self._exif._loaded = False
self.getexif()
def get_child_images(self):
child_images = []
exif = self.getexif()
ifds = []
if ExifTags.Base.SubIFDs in exif:
subifd_offsets = exif[ExifTags.Base.SubIFDs]
if subifd_offsets:
if not isinstance(subifd_offsets, tuple):
subifd_offsets = (subifd_offsets,)
for subifd_offset in subifd_offsets:
ifds.append((exif._get_ifd_dict(subifd_offset), subifd_offset))
ifd1 = exif.get_ifd(ExifTags.IFD.IFD1)
if ifd1 and ifd1.get(513):
ifds.append((ifd1, exif._info.next))
offset = None
for ifd, ifd_offset in ifds:
current_offset = self.fp.tell()
if offset is None:
offset = current_offset
fp = self.fp
thumbnail_offset = ifd.get(513)
if thumbnail_offset is not None:
try:
thumbnail_offset += self._exif_offset
except AttributeError:
pass
self.fp.seek(thumbnail_offset)
data = self.fp.read(ifd.get(514))
fp = io.BytesIO(data)
with open(fp) as im:
if thumbnail_offset is None:
im._frame_pos = [ifd_offset]
im._seek(0)
im.load()
child_images.append(im)
if offset is not None:
self.fp.seek(offset)
return child_images
def getim(self):
"""
Returns a capsule that points to the internal image memory.
:returns: A capsule object.
"""
self.load()
return self.im.ptr
def getpalette(self, rawmode="RGB"):
"""
Returns the image palette as a list.
:param rawmode: The mode in which to return the palette. ``None`` will
return the palette in its current mode.
.. versionadded:: 9.1.0
:returns: A list of color values [r, g, b, ...], or None if the
image has no palette.
"""
self.load()
try:
mode = self.im.getpalettemode()
except ValueError:
return None # no palette
if rawmode is None:
rawmode = mode
return list(self.im.getpalette(mode, rawmode))
def apply_transparency(self):
"""
If a P mode image has a "transparency" key in the info dictionary,
remove the key and instead apply the transparency to the palette.
Otherwise, the image is unchanged.
"""
if self.mode != "P" or "transparency" not in self.info:
return
from . import ImagePalette
palette = self.getpalette("RGBA")
transparency = self.info["transparency"]
if isinstance(transparency, bytes):
for i, alpha in enumerate(transparency):
palette[i * 4 + 3] = alpha
else:
palette[transparency * 4 + 3] = 0
self.palette = ImagePalette.ImagePalette("RGBA", bytes(palette))
self.palette.dirty = 1
del self.info["transparency"]
def getpixel(self, xy):
"""
Returns the pixel value at a given position.
:param xy: The coordinate, given as (x, y). See
:ref:`coordinate-system`.
:returns: The pixel value. If the image is a multi-layer image,
this method returns a tuple.
"""
self.load()
if self.pyaccess:
return self.pyaccess.getpixel(xy)
return self.im.getpixel(xy)
def getprojection(self):
"""
Get projection to x and y axes
:returns: Two sequences, indicating where there are non-zero
pixels along the X-axis and the Y-axis, respectively.
"""
self.load()
x, y = self.im.getprojection()
return list(x), list(y)
def histogram(self, mask=None, extrema=None):
"""
Returns a histogram for the image. The histogram is returned as a
list of pixel counts, one for each pixel value in the source
image. Counts are grouped into 256 bins for each band, even if
the image has more than 8 bits per band. If the image has more
than one band, the histograms for all bands are concatenated (for
example, the histogram for an "RGB" image contains 768 values).
A bilevel image (mode "1") is treated as a greyscale ("L") image
by this method.
If a mask is provided, the method returns a histogram for those
parts of the image where the mask image is non-zero. The mask
image must have the same size as the image, and be either a
bi-level image (mode "1") or a greyscale image ("L").
:param mask: An optional mask.
:param extrema: An optional tuple of manually-specified extrema.
:returns: A list containing pixel counts.
"""
self.load()
if mask:
mask.load()
return self.im.histogram((0, 0), mask.im)
if self.mode in ("I", "F"):
if extrema is None:
extrema = self.getextrema()
return self.im.histogram(extrema)
return self.im.histogram()
def entropy(self, mask=None, extrema=None):
"""
Calculates and returns the entropy for the image.
A bilevel image (mode "1") is treated as a greyscale ("L")
image by this method.
If a mask is provided, the method employs the histogram for
those parts of the image where the mask image is non-zero.
The mask image must have the same size as the image, and be
either a bi-level image (mode "1") or a greyscale image ("L").
:param mask: An optional mask.
:param extrema: An optional tuple of manually-specified extrema.
:returns: A float value representing the image entropy
"""
self.load()
if mask:
mask.load()
return self.im.entropy((0, 0), mask.im)
if self.mode in ("I", "F"):
if extrema is None:
extrema = self.getextrema()
return self.im.entropy(extrema)
return self.im.entropy()
def paste(self, im, box=None, mask=None):
"""
Pastes another image into this image. The box argument is either
a 2-tuple giving the upper left corner, a 4-tuple defining the
left, upper, right, and lower pixel coordinate, or None (same as
(0, 0)). See :ref:`coordinate-system`. If a 4-tuple is given, the size
of the pasted image must match the size of the region.
If the modes don't match, the pasted image is converted to the mode of
this image (see the :py:meth:`~PIL.Image.Image.convert` method for
details).
Instead of an image, the source can be a integer or tuple
containing pixel values. The method then fills the region
with the given color. When creating RGB images, you can
also use color strings as supported by the ImageColor module.
If a mask is given, this method updates only the regions
indicated by the mask. You can use either "1", "L", "LA", "RGBA"
or "RGBa" images (if present, the alpha band is used as mask).
Where the mask is 255, the given image is copied as is. Where
the mask is 0, the current value is preserved. Intermediate
values will mix the two images together, including their alpha
channels if they have them.
See :py:meth:`~PIL.Image.Image.alpha_composite` if you want to
combine images with respect to their alpha channels.
:param im: Source image or pixel value (integer or tuple).
:param box: An optional 4-tuple giving the region to paste into.
If a 2-tuple is used instead, it's treated as the upper left
corner. If omitted or None, the source is pasted into the
upper left corner.
If an image is given as the second argument and there is no
third, the box defaults to (0, 0), and the second argument
is interpreted as a mask image.
:param mask: An optional mask image.
"""
if isImageType(box) and mask is None:
# abbreviated paste(im, mask) syntax
mask = box
box = None
if box is None:
box = (0, 0)
if len(box) == 2:
# upper left corner given; get size from image or mask
if isImageType(im):
size = im.size
elif isImageType(mask):
size = mask.size
else:
# FIXME: use self.size here?
msg = "cannot determine region size; use 4-item box"
raise ValueError(msg)
box += (box[0] + size[0], box[1] + size[1])
if isinstance(im, str):
from . import ImageColor
im = ImageColor.getcolor(im, self.mode)
elif isImageType(im):
im.load()
if self.mode != im.mode:
if self.mode != "RGB" or im.mode not in ("LA", "RGBA", "RGBa"):
# should use an adapter for this!
im = im.convert(self.mode)
im = im.im
self._ensure_mutable()
if mask:
mask.load()
self.im.paste(im, box, mask.im)
else:
self.im.paste(im, box)
def alpha_composite(self, im, dest=(0, 0), source=(0, 0)):
"""'In-place' analog of Image.alpha_composite. Composites an image
onto this image.
:param im: image to composite over this one
:param dest: Optional 2 tuple (left, top) specifying the upper
left corner in this (destination) image.
:param source: Optional 2 (left, top) tuple for the upper left
corner in the overlay source image, or 4 tuple (left, top, right,
bottom) for the bounds of the source rectangle
Performance Note: Not currently implemented in-place in the core layer.
"""
if not isinstance(source, (list, tuple)):
msg = "Source must be a tuple"
raise ValueError(msg)
if not isinstance(dest, (list, tuple)):
msg = "Destination must be a tuple"
raise ValueError(msg)
if not len(source) in (2, 4):
msg = "Source must be a 2 or 4-tuple"
raise ValueError(msg)
if not len(dest) == 2:
msg = "Destination must be a 2-tuple"
raise ValueError(msg)
if min(source) < 0:
msg = "Source must be non-negative"
raise ValueError(msg)
if len(source) == 2:
source = source + im.size
# over image, crop if it's not the whole thing.
if source == (0, 0) + im.size:
overlay = im
else:
overlay = im.crop(source)
# target for the paste
box = dest + (dest[0] + overlay.width, dest[1] + overlay.height)
# destination image. don't copy if we're using the whole image.
if box == (0, 0) + self.size:
background = self
else:
background = self.crop(box)
result = alpha_composite(background, overlay)
self.paste(result, box)
def point(self, lut, mode=None):
"""
Maps this image through a lookup table or function.
:param lut: A lookup table, containing 256 (or 65536 if
self.mode=="I" and mode == "L") values per band in the
image. A function can be used instead, it should take a
single argument. The function is called once for each
possible pixel value, and the resulting table is applied to
all bands of the image.
It may also be an :py:class:`~PIL.Image.ImagePointHandler`
object::
class Example(Image.ImagePointHandler):
def point(self, data):
# Return result
:param mode: Output mode (default is same as input). In the
current version, this can only be used if the source image
has mode "L" or "P", and the output has mode "1" or the
source image mode is "I" and the output mode is "L".
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
if isinstance(lut, ImagePointHandler):
return lut.point(self)
if callable(lut):
# if it isn't a list, it should be a function
if self.mode in ("I", "I;16", "F"):
# check if the function can be used with point_transform
# UNDONE wiredfool -- I think this prevents us from ever doing
# a gamma function point transform on > 8bit images.
scale, offset = _getscaleoffset(lut)
return self._new(self.im.point_transform(scale, offset))
# for other modes, convert the function to a table
lut = [lut(i) for i in range(256)] * self.im.bands
if self.mode == "F":
# FIXME: _imaging returns a confusing error message for this case
msg = "point operation not supported for this mode"
raise ValueError(msg)
if mode != "F":
lut = [round(i) for i in lut]
return self._new(self.im.point(lut, mode))
def putalpha(self, alpha):
"""
Adds or replaces the alpha layer in this image. If the image
does not have an alpha layer, it's converted to "LA" or "RGBA".
The new layer must be either "L" or "1".
:param alpha: The new alpha layer. This can either be an "L" or "1"
image having the same size as this image, or an integer or
other color value.
"""
self._ensure_mutable()
if self.mode not in ("LA", "PA", "RGBA"):
# attempt to promote self to a matching alpha mode
try:
mode = getmodebase(self.mode) + "A"
try:
self.im.setmode(mode)
except (AttributeError, ValueError) as e:
# do things the hard way
im = self.im.convert(mode)
if im.mode not in ("LA", "PA", "RGBA"):
raise ValueError from e # sanity check
self.im = im
self.pyaccess = None
self.mode = self.im.mode
except KeyError as e:
msg = "illegal image mode"
raise ValueError(msg) from e
if self.mode in ("LA", "PA"):
band = 1
else:
band = 3
if isImageType(alpha):
# alpha layer
if alpha.mode not in ("1", "L"):
msg = "illegal image mode"
raise ValueError(msg)
alpha.load()
if alpha.mode == "1":
alpha = alpha.convert("L")
else:
# constant alpha
try:
self.im.fillband(band, alpha)
except (AttributeError, ValueError):
# do things the hard way
alpha = new("L", self.size, alpha)
else:
return
self.im.putband(alpha.im, band)
def putdata(self, data, scale=1.0, offset=0.0):
"""
Copies pixel data from a flattened sequence object into the image. The
values should start at the upper left corner (0, 0), continue to the
end of the line, followed directly by the first value of the second
line, and so on. Data will be read until either the image or the
sequence ends. The scale and offset values are used to adjust the
sequence values: **pixel = value*scale + offset**.
:param data: A flattened sequence object.
:param scale: An optional scale value. The default is 1.0.
:param offset: An optional offset value. The default is 0.0.
"""
self._ensure_mutable()
self.im.putdata(data, scale, offset)
def putpalette(self, data, rawmode="RGB"):
"""
Attaches a palette to this image. The image must be a "P", "PA", "L"
or "LA" image.
The palette sequence must contain at most 256 colors, made up of one
integer value for each channel in the raw mode.
For example, if the raw mode is "RGB", then it can contain at most 768
values, made up of red, green and blue values for the corresponding pixel
index in the 256 colors.
If the raw mode is "RGBA", then it can contain at most 1024 values,
containing red, green, blue and alpha values.
Alternatively, an 8-bit string may be used instead of an integer sequence.
:param data: A palette sequence (either a list or a string).
:param rawmode: The raw mode of the palette. Either "RGB", "RGBA", or a mode
that can be transformed to "RGB" or "RGBA" (e.g. "R", "BGR;15", "RGBA;L").
"""
from . import ImagePalette
if self.mode not in ("L", "LA", "P", "PA"):
msg = "illegal image mode"
raise ValueError(msg)
if isinstance(data, ImagePalette.ImagePalette):
palette = ImagePalette.raw(data.rawmode, data.palette)
else:
if not isinstance(data, bytes):
data = bytes(data)
palette = ImagePalette.raw(rawmode, data)
self.mode = "PA" if "A" in self.mode else "P"
self.palette = palette
self.palette.mode = "RGB"
self.load() # install new palette
def putpixel(self, xy, value):
"""
Modifies the pixel at the given position. The color is given as
a single numerical value for single-band images, and a tuple for
multi-band images. In addition to this, RGB and RGBA tuples are
accepted for P and PA images.
Note that this method is relatively slow. For more extensive changes,
use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw`
module instead.
See:
* :py:meth:`~PIL.Image.Image.paste`
* :py:meth:`~PIL.Image.Image.putdata`
* :py:mod:`~PIL.ImageDraw`
:param xy: The pixel coordinate, given as (x, y). See
:ref:`coordinate-system`.
:param value: The pixel value.
"""
if self.readonly:
self._copy()
self.load()
if self.pyaccess:
return self.pyaccess.putpixel(xy, value)
if (
self.mode in ("P", "PA")
and isinstance(value, (list, tuple))
and len(value) in [3, 4]
):
# RGB or RGBA value for a P or PA image
if self.mode == "PA":
alpha = value[3] if len(value) == 4 else 255
value = value[:3]
value = self.palette.getcolor(value, self)
if self.mode == "PA":
value = (value, alpha)
return self.im.putpixel(xy, value)
def remap_palette(self, dest_map, source_palette=None):
"""
Rewrites the image to reorder the palette.
:param dest_map: A list of indexes into the original palette.
e.g. ``[1,0]`` would swap a two item palette, and ``list(range(256))``
is the identity transform.
:param source_palette: Bytes or None.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
from . import ImagePalette
if self.mode not in ("L", "P"):
msg = "illegal image mode"
raise ValueError(msg)
bands = 3
palette_mode = "RGB"
if source_palette is None:
if self.mode == "P":
self.load()
palette_mode = self.im.getpalettemode()
if palette_mode == "RGBA":
bands = 4
source_palette = self.im.getpalette(palette_mode, palette_mode)
else: # L-mode
source_palette = bytearray(i // 3 for i in range(768))
palette_bytes = b""
new_positions = [0] * 256
# pick only the used colors from the palette
for i, oldPosition in enumerate(dest_map):
palette_bytes += source_palette[
oldPosition * bands : oldPosition * bands + bands
]
new_positions[oldPosition] = i
# replace the palette color id of all pixel with the new id
# Palette images are [0..255], mapped through a 1 or 3
# byte/color map. We need to remap the whole image
# from palette 1 to palette 2. New_positions is
# an array of indexes into palette 1. Palette 2 is
# palette 1 with any holes removed.
# We're going to leverage the convert mechanism to use the
# C code to remap the image from palette 1 to palette 2,
# by forcing the source image into 'L' mode and adding a
# mapping 'L' mode palette, then converting back to 'L'
# sans palette thus converting the image bytes, then
# assigning the optimized RGB palette.
# perf reference, 9500x4000 gif, w/~135 colors
# 14 sec prepatch, 1 sec postpatch with optimization forced.
mapping_palette = bytearray(new_positions)
m_im = self.copy()
m_im.mode = "P"
m_im.palette = ImagePalette.ImagePalette(
palette_mode, palette=mapping_palette * bands
)
# possibly set palette dirty, then
# m_im.putpalette(mapping_palette, 'L') # converts to 'P'
# or just force it.
# UNDONE -- this is part of the general issue with palettes
m_im.im.putpalette(palette_mode + ";L", m_im.palette.tobytes())
m_im = m_im.convert("L")
m_im.putpalette(palette_bytes, palette_mode)
m_im.palette = ImagePalette.ImagePalette(palette_mode, palette=palette_bytes)
if "transparency" in self.info:
try:
m_im.info["transparency"] = dest_map.index(self.info["transparency"])
except ValueError:
if "transparency" in m_im.info:
del m_im.info["transparency"]
return m_im
def _get_safe_box(self, size, resample, box):
"""Expands the box so it includes adjacent pixels
that may be used by resampling with the given resampling filter.
"""
filter_support = _filters_support[resample] - 0.5
scale_x = (box[2] - box[0]) / size[0]
scale_y = (box[3] - box[1]) / size[1]
support_x = filter_support * scale_x
support_y = filter_support * scale_y
return (
max(0, int(box[0] - support_x)),
max(0, int(box[1] - support_y)),
min(self.size[0], math.ceil(box[2] + support_x)),
min(self.size[1], math.ceil(box[3] + support_y)),
)
def resize(self, size, resample=None, box=None, reducing_gap=None):
"""
Returns a resized copy of this image.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param resample: An optional resampling filter. This can be
one of :py:data:`Resampling.NEAREST`, :py:data:`Resampling.BOX`,
:py:data:`Resampling.BILINEAR`, :py:data:`Resampling.HAMMING`,
:py:data:`Resampling.BICUBIC` or :py:data:`Resampling.LANCZOS`.
If the image has mode "1" or "P", it is always set to
:py:data:`Resampling.NEAREST`. If the image mode specifies a number
of bits, such as "I;16", then the default filter is
:py:data:`Resampling.NEAREST`. Otherwise, the default filter is
:py:data:`Resampling.BICUBIC`. See: :ref:`concept-filters`.
:param box: An optional 4-tuple of floats providing
the source image region to be scaled.
The values must be within (0, 0, width, height) rectangle.
If omitted or None, the entire source is used.
:param reducing_gap: Apply optimization by resizing the image
in two steps. First, reducing the image by integer times
using :py:meth:`~PIL.Image.Image.reduce`.
Second, resizing using regular resampling. The last step
changes size no less than by ``reducing_gap`` times.
``reducing_gap`` may be None (no first step is performed)
or should be greater than 1.0. The bigger ``reducing_gap``,
the closer the result to the fair resampling.
The smaller ``reducing_gap``, the faster resizing.
With ``reducing_gap`` greater or equal to 3.0, the result is
indistinguishable from fair resampling in most cases.
The default value is None (no optimization).
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if resample is None:
type_special = ";" in self.mode
resample = Resampling.NEAREST if type_special else Resampling.BICUBIC
elif resample not in (
Resampling.NEAREST,
Resampling.BILINEAR,
Resampling.BICUBIC,
Resampling.LANCZOS,
Resampling.BOX,
Resampling.HAMMING,
):
msg = f"Unknown resampling filter ({resample})."
filters = [
f"{filter[1]} ({filter[0]})"
for filter in (
(Resampling.NEAREST, "Image.Resampling.NEAREST"),
(Resampling.LANCZOS, "Image.Resampling.LANCZOS"),
(Resampling.BILINEAR, "Image.Resampling.BILINEAR"),
(Resampling.BICUBIC, "Image.Resampling.BICUBIC"),
(Resampling.BOX, "Image.Resampling.BOX"),
(Resampling.HAMMING, "Image.Resampling.HAMMING"),
)
]
msg += " Use " + ", ".join(filters[:-1]) + " or " + filters[-1]
raise ValueError(msg)
if reducing_gap is not None and reducing_gap < 1.0:
msg = "reducing_gap must be 1.0 or greater"
raise ValueError(msg)
size = tuple(size)
self.load()
if box is None:
box = (0, 0) + self.size
else:
box = tuple(box)
if self.size == size and box == (0, 0) + self.size:
return self.copy()
if self.mode in ("1", "P"):
resample = Resampling.NEAREST
if self.mode in ["LA", "RGBA"] and resample != Resampling.NEAREST:
im = self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
im = im.resize(size, resample, box)
return im.convert(self.mode)
self.load()
if reducing_gap is not None and resample != Resampling.NEAREST:
factor_x = int((box[2] - box[0]) / size[0] / reducing_gap) or 1
factor_y = int((box[3] - box[1]) / size[1] / reducing_gap) or 1
if factor_x > 1 or factor_y > 1:
reduce_box = self._get_safe_box(size, resample, box)
factor = (factor_x, factor_y)
if callable(self.reduce):
self = self.reduce(factor, box=reduce_box)
else:
self = Image.reduce(self, factor, box=reduce_box)
box = (
(box[0] - reduce_box[0]) / factor_x,
(box[1] - reduce_box[1]) / factor_y,
(box[2] - reduce_box[0]) / factor_x,
(box[3] - reduce_box[1]) / factor_y,
)
return self._new(self.im.resize(size, resample, box))
def reduce(self, factor, box=None):
"""
Returns a copy of the image reduced ``factor`` times.
If the size of the image is not dividable by ``factor``,
the resulting size will be rounded up.
:param factor: A greater than 0 integer or tuple of two integers
for width and height separately.
:param box: An optional 4-tuple of ints providing
the source image region to be reduced.
The values must be within ``(0, 0, width, height)`` rectangle.
If omitted or ``None``, the entire source is used.
"""
if not isinstance(factor, (list, tuple)):
factor = (factor, factor)
if box is None:
box = (0, 0) + self.size
else:
box = tuple(box)
if factor == (1, 1) and box == (0, 0) + self.size:
return self.copy()
if self.mode in ["LA", "RGBA"]:
im = self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
im = im.reduce(factor, box)
return im.convert(self.mode)
self.load()
return self._new(self.im.reduce(factor, box))
def rotate(
self,
angle,
resample=Resampling.NEAREST,
expand=0,
center=None,
translate=None,
fillcolor=None,
):
"""
Returns a rotated copy of this image. This method returns a
copy of this image, rotated the given number of degrees counter
clockwise around its centre.
:param angle: In degrees counter clockwise.
:param resample: An optional resampling filter. This can be
one of :py:data:`Resampling.NEAREST` (use nearest neighbour),
:py:data:`Resampling.BILINEAR` (linear interpolation in a 2x2
environment), or :py:data:`Resampling.BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image has
mode "1" or "P", it is set to :py:data:`Resampling.NEAREST`.
See :ref:`concept-filters`.
:param expand: Optional expansion flag. If true, expands the output
image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the
input image. Note that the expand flag assumes rotation around
the center and no translation.
:param center: Optional center of rotation (a 2-tuple). Origin is
the upper left corner. Default is the center of the image.
:param translate: An optional post-rotate translation (a 2-tuple).
:param fillcolor: An optional color for area outside the rotated image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
angle = angle % 360.0
# Fast paths regardless of filter, as long as we're not
# translating or changing the center.
if not (center or translate):
if angle == 0:
return self.copy()
if angle == 180:
return self.transpose(Transpose.ROTATE_180)
if angle in (90, 270) and (expand or self.width == self.height):
return self.transpose(
Transpose.ROTATE_90 if angle == 90 else Transpose.ROTATE_270
)
# Calculate the affine matrix. Note that this is the reverse
# transformation (from destination image to source) because we
# want to interpolate the (discrete) destination pixel from
# the local area around the (floating) source pixel.
# The matrix we actually want (note that it operates from the right):
# (1, 0, tx) (1, 0, cx) ( cos a, sin a, 0) (1, 0, -cx)
# (0, 1, ty) * (0, 1, cy) * (-sin a, cos a, 0) * (0, 1, -cy)
# (0, 0, 1) (0, 0, 1) ( 0, 0, 1) (0, 0, 1)
# The reverse matrix is thus:
# (1, 0, cx) ( cos -a, sin -a, 0) (1, 0, -cx) (1, 0, -tx)
# (0, 1, cy) * (-sin -a, cos -a, 0) * (0, 1, -cy) * (0, 1, -ty)
# (0, 0, 1) ( 0, 0, 1) (0, 0, 1) (0, 0, 1)
# In any case, the final translation may be updated at the end to
# compensate for the expand flag.
w, h = self.size
if translate is None:
post_trans = (0, 0)
else:
post_trans = translate
if center is None:
# FIXME These should be rounded to ints?
rotn_center = (w / 2.0, h / 2.0)
else:
rotn_center = center
angle = -math.radians(angle)
matrix = [
round(math.cos(angle), 15),
round(math.sin(angle), 15),
0.0,
round(-math.sin(angle), 15),
round(math.cos(angle), 15),
0.0,
]
def transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return a * x + b * y + c, d * x + e * y + f
matrix[2], matrix[5] = transform(
-rotn_center[0] - post_trans[0], -rotn_center[1] - post_trans[1], matrix
)
matrix[2] += rotn_center[0]
matrix[5] += rotn_center[1]
if expand:
# calculate output size
xx = []
yy = []
for x, y in ((0, 0), (w, 0), (w, h), (0, h)):
x, y = transform(x, y, matrix)
xx.append(x)
yy.append(y)
nw = math.ceil(max(xx)) - math.floor(min(xx))
nh = math.ceil(max(yy)) - math.floor(min(yy))
# We multiply a translation matrix from the right. Because of its
# special form, this is the same as taking the image of the
# translation vector as new translation vector.
matrix[2], matrix[5] = transform(-(nw - w) / 2.0, -(nh - h) / 2.0, matrix)
w, h = nw, nh
return self.transform(
(w, h), Transform.AFFINE, matrix, resample, fillcolor=fillcolor
)
def save(self, fp, format=None, **params):
"""
Saves this image under the given filename. If no format is
specified, the format to use is determined from the filename
extension, if possible.
Keyword options can be used to provide additional instructions
to the writer. If a writer doesn't recognise an option, it is
silently ignored. The available options are described in the
:doc:`image format documentation
<../handbook/image-file-formats>` for each writer.
You can use a file object instead of a filename. In this case,
you must always specify the format. The file object must
implement the ``seek``, ``tell``, and ``write``
methods, and be opened in binary mode.
:param fp: A filename (string), pathlib.Path object or file object.
:param format: Optional format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
:param params: Extra parameters to the image writer.
:returns: None
:exception ValueError: If the output format could not be determined
from the file name. Use the format option to solve this.
:exception OSError: If the file could not be written. The file
may have been created, and may contain partial data.
"""
filename = ""
open_fp = False
if isinstance(fp, Path):
filename = str(fp)
open_fp = True
elif is_path(fp):
filename = fp
open_fp = True
elif fp == sys.stdout:
try:
fp = sys.stdout.buffer
except AttributeError:
pass
if not filename and hasattr(fp, "name") and is_path(fp.name):
# only set the name for metadata purposes
filename = fp.name
# may mutate self!
self._ensure_mutable()
save_all = params.pop("save_all", False)
self.encoderinfo = params
self.encoderconfig = ()
preinit()
ext = os.path.splitext(filename)[1].lower()
if not format:
if ext not in EXTENSION:
init()
try:
format = EXTENSION[ext]
except KeyError as e:
msg = f"unknown file extension: {ext}"
raise ValueError(msg) from e
if format.upper() not in SAVE:
init()
if save_all:
save_handler = SAVE_ALL[format.upper()]
else:
save_handler = SAVE[format.upper()]
created = False
if open_fp:
created = not os.path.exists(filename)
if params.get("append", False):
# Open also for reading ("+"), because TIFF save_all
# writer needs to go back and edit the written data.
fp = builtins.open(filename, "r+b")
else:
fp = builtins.open(filename, "w+b")
try:
save_handler(self, fp, filename)
except Exception:
if open_fp:
fp.close()
if created:
try:
os.remove(filename)
except PermissionError:
pass
raise
if open_fp:
fp.close()
def seek(self, frame):
"""
Seeks to the given frame in this sequence file. If you seek
beyond the end of the sequence, the method raises an
``EOFError`` exception. When a sequence file is opened, the
library automatically seeks to frame 0.
See :py:meth:`~PIL.Image.Image.tell`.
If defined, :attr:`~PIL.Image.Image.n_frames` refers to the
number of available frames.
:param frame: Frame number, starting at 0.
:exception EOFError: If the call attempts to seek beyond the end
of the sequence.
"""
# overridden by file handlers
if frame != 0:
raise EOFError
def show(self, title=None):
"""
Displays this image. This method is mainly intended for debugging purposes.
This method calls :py:func:`PIL.ImageShow.show` internally. You can use
:py:func:`PIL.ImageShow.register` to override its default behaviour.
The image is first saved to a temporary file. By default, it will be in
PNG format.
On Unix, the image is then opened using the **display**, **eog** or
**xv** utility, depending on which one can be found.
On macOS, the image is opened with the native Preview application.
On Windows, the image is opened with the standard PNG display utility.
:param title: Optional title to use for the image window, where possible.
"""
_show(self, title=title)
def split(self):
"""
Split this image into individual bands. This method returns a
tuple of individual image bands from an image. For example,
splitting an "RGB" image creates three new images each
containing a copy of one of the original bands (red, green,
blue).
If you need only one band, :py:meth:`~PIL.Image.Image.getchannel`
method can be more convenient and faster.
:returns: A tuple containing bands.
"""
self.load()
if self.im.bands == 1:
ims = [self.copy()]
else:
ims = map(self._new, self.im.split())
return tuple(ims)
def getchannel(self, channel):
"""
Returns an image containing a single channel of the source image.
:param channel: What channel to return. Could be index
(0 for "R" channel of "RGB") or channel name
("A" for alpha channel of "RGBA").
:returns: An image in "L" mode.
.. versionadded:: 4.3.0
"""
self.load()
if isinstance(channel, str):
try:
channel = self.getbands().index(channel)
except ValueError as e:
msg = f'The image has no channel "{channel}"'
raise ValueError(msg) from e
return self._new(self.im.getband(channel))
def tell(self):
"""
Returns the current frame number. See :py:meth:`~PIL.Image.Image.seek`.
If defined, :attr:`~PIL.Image.Image.n_frames` refers to the
number of available frames.
:returns: Frame number, starting with 0.
"""
return 0
def thumbnail(self, size, resample=Resampling.BICUBIC, reducing_gap=2.0):
"""
Make this image into a thumbnail. This method modifies the
image to contain a thumbnail version of itself, no larger than
the given size. This method calculates an appropriate thumbnail
size to preserve the aspect of the image, calls the
:py:meth:`~PIL.Image.Image.draft` method to configure the file reader
(where applicable), and finally resizes the image.
Note that this function modifies the :py:class:`~PIL.Image.Image`
object in place. If you need to use the full resolution image as well,
apply this method to a :py:meth:`~PIL.Image.Image.copy` of the original
image.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param resample: Optional resampling filter. This can be one
of :py:data:`Resampling.NEAREST`, :py:data:`Resampling.BOX`,
:py:data:`Resampling.BILINEAR`, :py:data:`Resampling.HAMMING`,
:py:data:`Resampling.BICUBIC` or :py:data:`Resampling.LANCZOS`.
If omitted, it defaults to :py:data:`Resampling.BICUBIC`.
(was :py:data:`Resampling.NEAREST` prior to version 2.5.0).
See: :ref:`concept-filters`.
:param reducing_gap: Apply optimization by resizing the image
in two steps. First, reducing the image by integer times
using :py:meth:`~PIL.Image.Image.reduce` or
:py:meth:`~PIL.Image.Image.draft` for JPEG images.
Second, resizing using regular resampling. The last step
changes size no less than by ``reducing_gap`` times.
``reducing_gap`` may be None (no first step is performed)
or should be greater than 1.0. The bigger ``reducing_gap``,
the closer the result to the fair resampling.
The smaller ``reducing_gap``, the faster resizing.
With ``reducing_gap`` greater or equal to 3.0, the result is
indistinguishable from fair resampling in most cases.
The default value is 2.0 (very close to fair resampling
while still being faster in many cases).
:returns: None
"""
provided_size = tuple(map(math.floor, size))
def preserve_aspect_ratio():
def round_aspect(number, key):
return max(min(math.floor(number), math.ceil(number), key=key), 1)
x, y = provided_size
if x >= self.width and y >= self.height:
return
aspect = self.width / self.height
if x / y >= aspect:
x = round_aspect(y * aspect, key=lambda n: abs(aspect - n / y))
else:
y = round_aspect(
x / aspect, key=lambda n: 0 if n == 0 else abs(aspect - x / n)
)
return x, y
box = None
if reducing_gap is not None:
size = preserve_aspect_ratio()
if size is None:
return
res = self.draft(None, (size[0] * reducing_gap, size[1] * reducing_gap))
if res is not None:
box = res[1]
if box is None:
self.load()
# load() may have changed the size of the image
size = preserve_aspect_ratio()
if size is None:
return
if self.size != size:
im = self.resize(size, resample, box=box, reducing_gap=reducing_gap)
self.im = im.im
self._size = size
self.mode = self.im.mode
self.readonly = 0
self.pyaccess = None
# FIXME: the different transform methods need further explanation
# instead of bloating the method docs, add a separate chapter.
def transform(
self,
size,
method,
data=None,
resample=Resampling.NEAREST,
fill=1,
fillcolor=None,
):
"""
Transforms this image. This method creates a new image with the
given size, and the same mode as the original, and copies data
to the new image using the given transform.
:param size: The output size in pixels, as a 2-tuple:
(width, height).
:param method: The transformation method. This is one of
:py:data:`Transform.EXTENT` (cut out a rectangular subregion),
:py:data:`Transform.AFFINE` (affine transform),
:py:data:`Transform.PERSPECTIVE` (perspective transform),
:py:data:`Transform.QUAD` (map a quadrilateral to a rectangle), or
:py:data:`Transform.MESH` (map a number of source quadrilaterals
in one operation).
It may also be an :py:class:`~PIL.Image.ImageTransformHandler`
object::
class Example(Image.ImageTransformHandler):
def transform(self, size, data, resample, fill=1):
# Return result
It may also be an object with a ``method.getdata`` method
that returns a tuple supplying new ``method`` and ``data`` values::
class Example:
def getdata(self):
method = Image.Transform.EXTENT
data = (0, 0, 100, 100)
return method, data
:param data: Extra data to the transformation method.
:param resample: Optional resampling filter. It can be one of
:py:data:`Resampling.NEAREST` (use nearest neighbour),
:py:data:`Resampling.BILINEAR` (linear interpolation in a 2x2
environment), or :py:data:`Resampling.BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image
has mode "1" or "P", it is set to :py:data:`Resampling.NEAREST`.
See: :ref:`concept-filters`.
:param fill: If ``method`` is an
:py:class:`~PIL.Image.ImageTransformHandler` object, this is one of
the arguments passed to it. Otherwise, it is unused.
:param fillcolor: Optional fill color for the area outside the
transform in the output image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if self.mode in ("LA", "RGBA") and resample != Resampling.NEAREST:
return (
self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
.transform(size, method, data, resample, fill, fillcolor)
.convert(self.mode)
)
if isinstance(method, ImageTransformHandler):
return method.transform(size, self, resample=resample, fill=fill)
if hasattr(method, "getdata"):
# compatibility w. old-style transform objects
method, data = method.getdata()
if data is None:
msg = "missing method data"
raise ValueError(msg)
im = new(self.mode, size, fillcolor)
if self.mode == "P" and self.palette:
im.palette = self.palette.copy()
im.info = self.info.copy()
if method == Transform.MESH:
# list of quads
for box, quad in data:
im.__transformer(
box, self, Transform.QUAD, quad, resample, fillcolor is None
)
else:
im.__transformer(
(0, 0) + size, self, method, data, resample, fillcolor is None
)
return im
def __transformer(
self, box, image, method, data, resample=Resampling.NEAREST, fill=1
):
w = box[2] - box[0]
h = box[3] - box[1]
if method == Transform.AFFINE:
data = data[:6]
elif method == Transform.EXTENT:
# convert extent to an affine transform
x0, y0, x1, y1 = data
xs = (x1 - x0) / w
ys = (y1 - y0) / h
method = Transform.AFFINE
data = (xs, 0, x0, 0, ys, y0)
elif method == Transform.PERSPECTIVE:
data = data[:8]
elif method == Transform.QUAD:
# quadrilateral warp. data specifies the four corners
# given as NW, SW, SE, and NE.
nw = data[:2]
sw = data[2:4]
se = data[4:6]
ne = data[6:8]
x0, y0 = nw
As = 1.0 / w
At = 1.0 / h
data = (
x0,
(ne[0] - x0) * As,
(sw[0] - x0) * At,
(se[0] - sw[0] - ne[0] + x0) * As * At,
y0,
(ne[1] - y0) * As,
(sw[1] - y0) * At,
(se[1] - sw[1] - ne[1] + y0) * As * At,
)
else:
msg = "unknown transformation method"
raise ValueError(msg)
if resample not in (
Resampling.NEAREST,
Resampling.BILINEAR,
Resampling.BICUBIC,
):
if resample in (Resampling.BOX, Resampling.HAMMING, Resampling.LANCZOS):
msg = {
Resampling.BOX: "Image.Resampling.BOX",
Resampling.HAMMING: "Image.Resampling.HAMMING",
Resampling.LANCZOS: "Image.Resampling.LANCZOS",
}[resample] + f" ({resample}) cannot be used."
else:
msg = f"Unknown resampling filter ({resample})."
filters = [
f"{filter[1]} ({filter[0]})"
for filter in (
(Resampling.NEAREST, "Image.Resampling.NEAREST"),
(Resampling.BILINEAR, "Image.Resampling.BILINEAR"),
(Resampling.BICUBIC, "Image.Resampling.BICUBIC"),
)
]
msg += " Use " + ", ".join(filters[:-1]) + " or " + filters[-1]
raise ValueError(msg)
image.load()
self.load()
if image.mode in ("1", "P"):
resample = Resampling.NEAREST
self.im.transform2(box, image.im, method, data, resample, fill)
def transpose(self, method):
"""
Transpose image (flip or rotate in 90 degree steps)
:param method: One of :py:data:`Transpose.FLIP_LEFT_RIGHT`,
:py:data:`Transpose.FLIP_TOP_BOTTOM`, :py:data:`Transpose.ROTATE_90`,
:py:data:`Transpose.ROTATE_180`, :py:data:`Transpose.ROTATE_270`,
:py:data:`Transpose.TRANSPOSE` or :py:data:`Transpose.TRANSVERSE`.
:returns: Returns a flipped or rotated copy of this image.
"""
self.load()
return self._new(self.im.transpose(method))
def effect_spread(self, distance):
"""
Randomly spread pixels in an image.
:param distance: Distance to spread pixels.
"""
self.load()
return self._new(self.im.effect_spread(distance))
def toqimage(self):
"""Returns a QImage copy of this image"""
from . import ImageQt
if not ImageQt.qt_is_installed:
msg = "Qt bindings are not installed"
raise ImportError(msg)
return ImageQt.toqimage(self)
def toqpixmap(self):
"""Returns a QPixmap copy of this image"""
from . import ImageQt
if not ImageQt.qt_is_installed:
msg = "Qt bindings are not installed"
raise ImportError(msg)
return ImageQt.toqpixmap(self)
def parse_args(arguments):
# prog = 'python wordcloud_cli.py'
parser = make_parser()
args = parser.parse_args(arguments)
if args.background_color == 'None':
args.background_color = None
if args.colormask and args.color:
raise ValueError('specify either a color mask or a color function')
args = vars(args)
with args.pop('text') as f:
text = f.read()
if args['stopwords']:
with args.pop('stopwords') as f:
args['stopwords'] = set(map(lambda l: l.strip(), f.readlines()))
if args['mask']:
mask = args.pop('mask')
args['mask'] = np.array(Image.open(mask))
color_func = wc.random_color_func
colormask = args.pop('colormask')
color = args.pop('color')
if colormask:
image = np.array(Image.open(colormask))
color_func = wc.ImageColorGenerator(image)
if color:
color_func = wc.get_single_color_func(color)
args['color_func'] = color_func
imagefile = args.pop('imagefile')
return args, text, imagefile | null |
171,393 | CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
Operating System :: Microsoft :: Windows
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: SQL
Topic :: Software Development
Topic :: Software Development :: Libraries :: Python Modules
Topic :: Database
"""
NAME = "adodbapi"
MAINTAINER = "Vernon Cole"
MAINTAINER_EMAIL = "vernondcole@gmail.com"
DESCRIPTION = (
"""A pure Python package implementing PEP 249 DB-API using Microsoft ADO."""
)
URL = "http://sourceforge.net/projects/adodbapi"
LICENSE = "LGPL"
CLASSIFIERS = filter(None, CLASSIFIERS.split("\n"))
AUTHOR = "Henrik Ekelund, Vernon Cole, et.al."
AUTHOR_EMAIL = "vernondcole@gmail.com"
PLATFORMS = ["Windows", "Linux"]
VERSION = None
class build_py(old_build_py):
def run(self):
build_src = self.get_finalized_command('build_src')
if build_src.py_modules_dict and self.packages is None:
self.packages = list(build_src.py_modules_dict.keys ())
old_build_py.run(self)
def find_package_modules(self, package, package_dir):
modules = old_build_py.find_package_modules(self, package, package_dir)
# Find build_src generated *.py files.
build_src = self.get_finalized_command('build_src')
modules += build_src.py_modules_dict.get(package, [])
return modules
def find_modules(self):
old_py_modules = self.py_modules[:]
new_py_modules = [_m for _m in self.py_modules if is_string(_m)]
self.py_modules[:] = new_py_modules
modules = old_build_py.find_modules(self)
self.py_modules[:] = old_py_modules
return modules
# XXX: Fix find_source_files for item in py_modules such that item is 3-tuple
# and item[2] is source file.
def setup(**attr):
cmdclass = numpy_cmdclass.copy()
new_attr = attr.copy()
if 'cmdclass' in new_attr:
cmdclass.update(new_attr['cmdclass'])
new_attr['cmdclass'] = cmdclass
if 'configuration' in new_attr:
# To avoid calling configuration if there are any errors
# or help request in command in the line.
configuration = new_attr.pop('configuration')
old_dist = distutils.core._setup_distribution
old_stop = distutils.core._setup_stop_after
distutils.core._setup_distribution = None
distutils.core._setup_stop_after = "commandline"
try:
dist = setup(**new_attr)
finally:
distutils.core._setup_distribution = old_dist
distutils.core._setup_stop_after = old_stop
if dist.help or not _command_line_ok():
# probably displayed help, skip running any commands
return dist
# create setup dictionary and append to new_attr
config = configuration()
if hasattr(config, 'todict'):
config = config.todict()
_dict_append(new_attr, **config)
# Move extension source libraries to libraries
libraries = []
for ext in new_attr.get('ext_modules', []):
new_libraries = []
for item in ext.libraries:
if is_sequence(item):
lib_name, build_info = item
_check_append_ext_library(libraries, lib_name, build_info)
new_libraries.append(lib_name)
elif is_string(item):
new_libraries.append(item)
else:
raise TypeError("invalid description of extension module "
"library %r" % (item,))
ext.libraries = new_libraries
if libraries:
if 'libraries' not in new_attr:
new_attr['libraries'] = []
for item in libraries:
_check_append_library(new_attr['libraries'], item)
# sources in ext_modules or libraries may contain header files
if ('ext_modules' in new_attr or 'libraries' in new_attr) \
and 'headers' not in new_attr:
new_attr['headers'] = []
# Use our custom NumpyDistribution class instead of distutils' one
new_attr['distclass'] = NumpyDistribution
return old_setup(**new_attr)
def setup_package():
from distutils.command.build_py import build_py
from distutils.core import setup
setup(
cmdclass={"build_py": build_py},
name=NAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
url=URL,
keywords="database ado odbc dbapi db-api Microsoft SQL",
## download_url=DOWNLOAD_URL,
long_description=open("README.txt").read(),
license=LICENSE,
classifiers=CLASSIFIERS,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
platforms=PLATFORMS,
version=VERSION,
package_dir={"adodbapi": ""},
packages=["adodbapi"],
)
return | null |
171,394 | import datetime
import decimal
import numbers
import sys
import time
from . import ado_consts as adc
def standardErrorHandler(connection, cursor, errorclass, errorvalue):
err = (errorclass, errorvalue)
try:
connection.messages.append(err)
except:
pass
if cursor is not None:
try:
cursor.messages.append(err)
except:
pass
raise errorclass(errorvalue) | null |
171,395 | import datetime
import decimal
import numbers
import sys
import time
from . import ado_consts as adc
StringTypes = str
class DataError(DatabaseError):
pass
typeMap = {
memoryViewType: adc.adVarBinary,
float: adc.adDouble,
type(None): adc.adEmpty,
str: adc.adBSTR,
bool: adc.adBoolean, # v2.1 Cole
decimal.Decimal: adc.adDecimal,
int: adc.adBigInt,
bytes: adc.adVarBinary,
}
def pyTypeToADOType(d):
tp = type(d)
try:
return typeMap[tp]
except KeyError: # The type was not defined in the pre-computed Type table
from . import dateconverter
if (
tp in dateconverter.types
): # maybe it is one of our supported Date/Time types
return adc.adDate
# otherwise, attempt to discern the type by probing the data object itself -- to handle duck typing
if isinstance(d, StringTypes):
return adc.adBSTR
if isinstance(d, numbers.Integral):
return adc.adBigInt
if isinstance(d, numbers.Real):
return adc.adDouble
raise DataError('cannot convert "%s" (type=%s) to ADO' % (repr(d), tp)) | null |
171,396 | import datetime
import decimal
import numbers
import sys
import time
from . import ado_consts as adc
def variantConvertDate(v):
from . import dateconverter # this function only called when adodbapi is running
return dateconverter.DateObjectFromCOMDate(v) | null |
171,397 | import datetime
import decimal
import numbers
import sys
import time
from . import ado_consts as adc
onIronPython = sys.platform == "cli"
if onIronPython: # we need type definitions for odd data we may need to convert
# noinspection PyUnresolvedReferences
from System import DateTime, DBNull
NullTypes = (type(None), DBNull)
else:
DateTime = type(NotImplemented) # should never be seen on win32
NullTypes = type(None)
def cvtString(variant): # use to get old action of adodbapi v1 if desired
if onIronPython:
try:
return variant.ToString()
except:
pass
return str(variant) | null |
171,398 | import datetime
import decimal
import numbers
import sys
import time
from . import ado_consts as adc
def cvtDecimal(variant): # better name
return _convertNumberWithCulture(variant, decimal.Decimal)
def cvtNumeric(variant): # older name - don't break old code
return cvtDecimal(variant) | null |
171,399 | import datetime
import decimal
import numbers
import sys
import time
from . import ado_consts as adc
def _convertNumberWithCulture(variant, f):
try:
return f(variant)
except (ValueError, TypeError, decimal.InvalidOperation):
try:
europeVsUS = str(variant).replace(",", ".")
return f(europeVsUS)
except (ValueError, TypeError, decimal.InvalidOperation):
pass
def cvtFloat(variant):
return _convertNumberWithCulture(variant, float) | null |
171,400 | import datetime
import decimal
import numbers
import sys
import time
from . import ado_consts as adc
def cvtInt(variant):
return int(variant) | null |
171,401 | import datetime
import decimal
import numbers
import sys
import time
from . import ado_consts as adc
def cvtLong(variant): # only important in old versions where long and int differ
return int(variant) | null |
171,402 | import datetime
import decimal
import numbers
import sys
import time
from . import ado_consts as adc
def cvtBuffer(variant):
return bytes(variant) | null |
171,403 | import datetime
import decimal
import numbers
import sys
import time
from . import ado_consts as adc
def cvtUnicode(variant):
return str(variant) | null |
171,404 | import datetime
import decimal
import numbers
import sys
import time
from . import ado_consts as adc
def identity(x):
return x | null |
171,405 | import datetime
import decimal
import numbers
import sys
import time
from . import ado_consts as adc
verbose = False
def cvtUnusual(variant):
if verbose > 1:
sys.stderr.write("Conversion called for Unusual data=%s\n" % repr(variant))
if isinstance(variant, DateTime): # COMdate or System.Date
from .adodbapi import ( # this will only be called when adodbapi is in use, and very rarely
dateconverter,
)
return dateconverter.DateObjectFromCOMDate(variant)
return variant # cannot find conversion function -- just give the data to the user | null |
171,406 | import datetime
import decimal
import numbers
import sys
import time
from . import ado_consts as adc
def convert_to_python(variant, func): # convert DB value into Python value
if isinstance(variant, NullTypes): # IronPython Null or None
return None
return func(variant) # call the appropriate conversion function | null |
171,407 | import datetime
import decimal
import numbers
import sys
import time
from . import ado_consts as adc
def changeNamedToQmark(
op,
): # convert from 'named' paramstyle to ADO required '?'mark parameters
outOp = ""
outparms = []
chunks = op.split(
"'"
) # quote all literals -- odd numbered list results are literals.
inQuotes = False
for chunk in chunks:
if inQuotes: # this is inside a quote
if chunk == "": # double apostrophe to quote one apostrophe
outOp = outOp[:-1] # so take one away
else:
outOp += "'" + chunk + "'" # else pass the quoted string as is.
else: # is SQL code -- look for a :namedParameter
while chunk: # some SQL string remains
sp = chunk.split(":", 1)
outOp += sp[0] # concat the part up to the :
s = ""
try:
chunk = sp[1]
except IndexError:
chunk = None
if chunk: # there was a parameter - parse it out
i = 0
c = chunk[0]
while c.isalnum() or c == "_":
i += 1
try:
c = chunk[i]
except IndexError:
break
s = chunk[:i]
chunk = chunk[i:]
if s:
outparms.append(s) # list the parameters in order
outOp += "?" # put in the Qmark
inQuotes = not inQuotes
return outOp, outparms | null |
171,408 | import datetime
import decimal
import numbers
import sys
import time
from . import ado_consts as adc
class ProgrammingError(DatabaseError):
def changeFormatToQmark(
op,
): # convert from 'format' paramstyle to ADO required '?'mark parameters
outOp = ""
outparams = []
chunks = op.split(
"'"
) # quote all literals -- odd numbered list results are literals.
inQuotes = False
for chunk in chunks:
if inQuotes:
if (
outOp != "" and chunk == ""
): # he used a double apostrophe to quote one apostrophe
outOp = outOp[:-1] # so take one away
else:
outOp += "'" + chunk + "'" # else pass the quoted string as is.
else: # is SQL code -- look for a %s parameter
if "%(" in chunk: # ugh! pyformat!
while chunk: # some SQL string remains
sp = chunk.split("%(", 1)
outOp += sp[0] # concat the part up to the %
if len(sp) > 1:
try:
s, chunk = sp[1].split(")s", 1) # find the ')s'
except ValueError:
raise ProgrammingError(
'Pyformat SQL has incorrect format near "%s"' % chunk
)
outparams.append(s)
outOp += "?" # put in the Qmark
else:
chunk = None
else: # proper '%s' format
sp = chunk.split("%s") # make each %s
outOp += "?".join(sp) # into ?
inQuotes = not inQuotes # every other chunk is a quoted string
return outOp, outparams | null |
171,409 | from . import adodbapi
def names(connection_object):
ado = connection_object.adoConn
schema = ado.OpenSchema(20) # constant = adSchemaTables
tables = []
while not schema.EOF:
name = adodbapi.getIndexedValue(schema.Fields, "TABLE_NAME").Value
tables.append(name)
schema.MoveNext()
del schema
return tables | null |
171,410 | import array
import datetime
import os
import sys
import time
import adodbapi
import adodbapi.apibase as api
import adodbapi.process_connect_string
from adodbapi.apibase import ProgrammingError
def Date(year, month, day): # dateconverter.Date(year,month,day)
def DateFromTicks(ticks):
return Date(*time.gmtime(ticks)[:3]) | null |
171,411 | import array
import datetime
import os
import sys
import time
import adodbapi
import adodbapi.apibase as api
import adodbapi.process_connect_string
from adodbapi.apibase import ProgrammingError
def Time(hour, minute, second):
return datetime.time(hour, minute, second) # dateconverter.Time(hour,minute,second)
def TimeFromTicks(ticks):
return Time(*time.gmtime(ticks)[3:6]) | null |
171,412 | import array
import datetime
import os
import sys
import time
import adodbapi
import adodbapi.apibase as api
import adodbapi.process_connect_string
from adodbapi.apibase import ProgrammingError
def Timestamp(year, month, day, hour, minute, second):
return datetime.datetime(year, month, day, hour, minute, second)
def TimestampFromTicks(ticks):
return Timestamp(*time.gmtime(ticks)[:6]) | null |
171,413 | import array
import datetime
import os
import sys
import time
try:
import Pyro4
except ImportError:
print('* * * Sorry, server operation requires Pyro4. Please "pip import" it.')
exit(11)
import adodbapi
import adodbapi.apibase as api
import adodbapi.process_connect_string
from adodbapi.apibase import ProgrammingError
_BaseException = api._BaseException
sys.excepthook = Pyro4.util.excepthook
Pyro4.config.PREFER_IP_VERSION = 0
Pyro4.config.COMMTIMEOUT = 40.0
Pyro4.config.SERIALIZER = "pickle"
def fix_uri(uri, kwargs):
"""convert a generic pyro uri with '0.0.0.0' into the address we actually called"""
u = uri.asString()
s = u.split("[::0]") # IPv6 generic address
if len(s) == 1: # did not find one
s = u.split("0.0.0.0") # IPv4 generic address
if len(s) > 1: # found a generic
return kwargs["proxy_host"].join(s) # fill in our address for the host
return uri
class Connection(object):
# include connection attributes required by api definition.
Warning = api.Warning
Error = api.Error
InterfaceError = api.InterfaceError
DataError = api.DataError
DatabaseError = api.DatabaseError
OperationalError = api.OperationalError
IntegrityError = api.IntegrityError
InternalError = api.InternalError
NotSupportedError = api.NotSupportedError
ProgrammingError = api.ProgrammingError
# set up some class attributes
paramstyle = api.paramstyle
def dbapi(self): # a proposed db-api version 3 extension.
"Return a reference to the DBAPI module for this Connection."
return api
def __init__(self):
self.proxy = None
self.kwargs = {}
self.errorhandler = None
self.supportsTransactions = False
self.paramstyle = api.paramstyle
self.timeout = 30
self.cursors = {}
def connect(self, kwargs, connection_maker):
self.kwargs = kwargs
if verbose:
print('%s attempting: "%s"' % (version, repr(kwargs)))
self.proxy = connection_maker
##try:
ret = self.proxy.connect(kwargs) # ask the server to hook us up
##except ImportError, e: # Pyro is trying to import pywinTypes.comerrer
## self._raiseConnectionError(api.DatabaseError, 'Proxy cannot connect using=%s' % repr(kwargs))
if ret is not True:
self._raiseConnectionError(
api.OperationalError, "Proxy returns error message=%s" % repr(ret)
)
self.supportsTransactions = self.getIndexedValue("supportsTransactions")
self.paramstyle = self.getIndexedValue("paramstyle")
self.timeout = self.getIndexedValue("timeout")
if verbose:
print("adodbapi.remote New connection at %X" % id(self))
def _raiseConnectionError(self, errorclass, errorvalue):
eh = self.errorhandler
if eh is None:
eh = api.standardErrorHandler
eh(self, None, errorclass, errorvalue)
def close(self):
"""Close the connection now (rather than whenever __del__ is called).
The connection will be unusable from this point forward;
an Error (or subclass) exception will be raised if any operation is attempted with the connection.
The same applies to all cursor objects trying to use the connection.
"""
for crsr in list(self.cursors.values())[
:
]: # copy the list, then close each one
crsr.close()
try:
"""close the underlying remote Connection object"""
self.proxy.close()
if verbose:
print("adodbapi.remote Closed connection at %X" % id(self))
object.__delattr__(
self, "proxy"
) # future attempts to use closed cursor will be caught by __getattr__
except Exception:
pass
def __del__(self):
try:
self.proxy.close()
except:
pass
def commit(self):
"""Commit any pending transaction to the database.
Note that if the database supports an auto-commit feature,
this must be initially off. An interface method may be provided to turn it back on.
Database modules that do not support transactions should implement this method with void functionality.
"""
if not self.supportsTransactions:
return
result = self.proxy.commit()
if result:
self._raiseConnectionError(
api.OperationalError, "Error during commit: %s" % result
)
def _rollback(self):
"""In case a database does provide transactions this method causes the the database to roll back to
the start of any pending transaction. Closing a connection without committing the changes first will
cause an implicit rollback to be performed.
"""
result = self.proxy.rollback()
if result:
self._raiseConnectionError(
api.OperationalError, "Error during rollback: %s" % result
)
def __setattr__(self, name, value):
if name in ("paramstyle", "timeout", "autocommit"):
if self.proxy:
self.proxy.send_attribute_to_host(name, value)
object.__setattr__(self, name, value) # store attribute locally (too)
def __getattr__(self, item):
if (
item == "rollback"
): # the rollback method only appears if the database supports transactions
if self.supportsTransactions:
return (
self._rollback
) # return the rollback method so the caller can execute it.
else:
raise self.ProgrammingError(
"this data provider does not support Rollback"
)
elif item in (
"dbms_name",
"dbms_version",
"connection_string",
"autocommit",
): # 'messages' ):
return self.getIndexedValue(item)
elif item == "proxy":
raise self.ProgrammingError("Attempting to use closed connection")
else:
raise self.ProgrammingError('No remote access for attribute="%s"' % item)
def getIndexedValue(self, index):
r = self.proxy.get_attribute_for_remote(index)
return r
def cursor(self):
"Return a new Cursor Object using the connection."
myCursor = Cursor(self)
return myCursor
def _i_am_here(self, crsr):
"message from a new cursor proclaiming its existence"
self.cursors[crsr.id] = crsr
def _i_am_closing(self, crsr):
"message from a cursor giving connection a chance to clean up"
try:
del self.cursors[crsr.id]
except:
pass
def __enter__(self): # Connections are context managers
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
self._rollback() # automatic rollback on errors
else:
self.commit()
def get_table_names(self):
return self.proxy.get_table_names()
The provided code snippet includes necessary dependencies for implementing the `connect` function. Write a Python function `def connect(*args, **kwargs)` to solve the following problem:
Create and open a remote db-api database connection object
Here is the function:
def connect(*args, **kwargs): # --> a remote db-api connection object
"""Create and open a remote db-api database connection object"""
# process the argument list the programmer gave us
kwargs = adodbapi.process_connect_string.process(args, kwargs)
# the "proxy_xxx" keys tell us where to find the PyRO proxy server
kwargs.setdefault(
"pyro_connection", "PYRO:ado.connection@%(proxy_host)s:%(proxy_port)s"
)
if not "proxy_port" in kwargs:
try:
pport = os.environ["PROXY_PORT"]
except KeyError:
pport = 9099
kwargs["proxy_port"] = pport
if not "proxy_host" in kwargs or not kwargs["proxy_host"]:
try:
phost = os.environ["PROXY_HOST"]
except KeyError:
phost = "[::1]" # '127.0.0.1'
kwargs["proxy_host"] = phost
ado_uri = kwargs["pyro_connection"] % kwargs
# ask PyRO make us a remote connection object
auto_retry = 3
while auto_retry:
try:
dispatcher = Pyro4.Proxy(ado_uri)
if "comm_timeout" in kwargs:
dispatcher._pyroTimeout = float(kwargs["comm_timeout"])
uri = dispatcher.make_connection()
break
except Pyro4.core.errors.PyroError:
auto_retry -= 1
if auto_retry:
time.sleep(1)
else:
raise api.DatabaseError("Cannot create connection to=%s" % ado_uri)
conn_uri = fix_uri(uri, kwargs) # get a host connection from the proxy server
while auto_retry:
try:
host_conn = Pyro4.Proxy(
conn_uri
) # bring up an exclusive Pyro connection for my ADO connection
break
except Pyro4.core.errors.PyroError:
auto_retry -= 1
if auto_retry:
time.sleep(1)
else:
raise api.DatabaseError(
"Cannot create ADO connection object using=%s" % conn_uri
)
if "comm_timeout" in kwargs:
host_conn._pyroTimeout = float(kwargs["comm_timeout"])
# make a local clone
myConn = Connection()
while auto_retry:
try:
myConn.connect(
kwargs, host_conn
) # call my connect method -- hand him the host connection
break
except Pyro4.core.errors.PyroError:
auto_retry -= 1
if auto_retry:
time.sleep(1)
else:
raise api.DatabaseError(
"Pyro error creating connection to/thru=%s" % repr(kwargs)
)
except _BaseException as e:
raise api.DatabaseError(
"Error creating remote connection to=%s, e=%s, %s"
% (repr(kwargs), repr(e), sys.exc_info()[2])
)
return myConn | Create and open a remote db-api database connection object |
171,414 | import array
import datetime
import os
import sys
import time
import adodbapi
import adodbapi.apibase as api
import adodbapi.process_connect_string
from adodbapi.apibase import ProgrammingError
memoryViewType = memoryview
The provided code snippet includes necessary dependencies for implementing the `fixpickle` function. Write a Python function `def fixpickle(x)` to solve the following problem:
pickle barfs on buffer(x) so we pass as array.array(x) then restore to original form for .execute()
Here is the function:
def fixpickle(x):
"""pickle barfs on buffer(x) so we pass as array.array(x) then restore to original form for .execute()"""
if x is None:
return None
if isinstance(x, dict):
# for 'named' paramstyle user will pass a mapping
newargs = {}
for arg, val in list(x.items()):
if isinstance(val, memoryViewType):
newval = array.array("B")
newval.fromstring(val)
newargs[arg] = newval
else:
newargs[arg] = val
return newargs
# if not a mapping, then a sequence
newargs = []
for arg in x:
if isinstance(arg, memoryViewType):
newarg = array.array("B")
newarg.fromstring(arg)
newargs.append(newarg)
else:
newargs.append(arg)
return newargs | pickle barfs on buffer(x) so we pass as array.array(x) then restore to original form for .execute() |
171,415 | import copy
import decimal
import os
import sys
import weakref
from . import ado_consts as adc, apibase as api, process_connect_string
onWin32 = False
if api.onIronPython:
from clr import Reference
from System import (
Activator,
Array,
Byte,
DateTime,
DBNull,
Decimal as SystemDecimal,
Type,
)
def Dispatch(dispatch):
type = Type.GetTypeFromProgID(dispatch)
return Activator.CreateInstance(type)
else: # try pywin32
try:
import pythoncom
import pywintypes
import win32com.client
onWin32 = True
def Dispatch(dispatch):
return win32com.client.Dispatch(dispatch)
except ImportError:
import warnings
warnings.warn(
"pywin32 package (or IronPython) required for adodbapi.", ImportWarning
)
from collections.abc import Mapping
def make_COM_connecter():
try:
if onWin32:
pythoncom.CoInitialize() # v2.1 Paj
c = Dispatch("ADODB.Connection") # connect _after_ CoIninialize v2.1.1 adamvan
except:
raise api.InterfaceError(
"Windows COM Error: Dispatch('ADODB.Connection') failed."
)
return c | null |
171,416 | import copy
import decimal
import os
import sys
import weakref
from . import ado_consts as adc, apibase as api, process_connect_string
if api.onIronPython:
from clr import Reference
from System import (
Activator,
Array,
Byte,
DateTime,
DBNull,
Decimal as SystemDecimal,
Type,
)
else: # try pywin32
try:
import pythoncom
import pywintypes
import win32com.client
onWin32 = True
except ImportError:
import warnings
warnings.warn(
"pywin32 package (or IronPython) required for adodbapi.", ImportWarning
)
from collections.abc import Mapping
class Connection(object):
# include connection attributes as class attributes required by api definition.
Warning = api.Warning
Error = api.Error
InterfaceError = api.InterfaceError
DataError = api.DataError
DatabaseError = api.DatabaseError
OperationalError = api.OperationalError
IntegrityError = api.IntegrityError
InternalError = api.InternalError
NotSupportedError = api.NotSupportedError
ProgrammingError = api.ProgrammingError
FetchFailedError = api.FetchFailedError # (special for django)
# ...class attributes... (can be overridden by instance attributes)
verbose = api.verbose
def dbapi(self): # a proposed db-api version 3 extension.
"Return a reference to the DBAPI module for this Connection."
return api
def __init__(self): # now define the instance attributes
self.connector = None
self.paramstyle = api.paramstyle
self.supportsTransactions = False
self.connection_string = ""
self.cursors = weakref.WeakValueDictionary()
self.dbms_name = ""
self.dbms_version = ""
self.errorhandler = None # use the standard error handler for this instance
self.transaction_level = 0 # 0 == Not in a transaction, at the top level
self._autocommit = False
def connect(self, kwargs, connection_maker=make_COM_connecter):
if verbose > 9:
print("kwargs=", repr(kwargs))
try:
self.connection_string = (
kwargs["connection_string"] % kwargs
) # insert keyword arguments
except Exception as e:
self._raiseConnectionError(
KeyError, "Python string format error in connection string->"
)
self.timeout = kwargs.get("timeout", 30)
self.mode = kwargs.get("mode", adc.adModeUnknown)
self.kwargs = kwargs
if verbose:
print('%s attempting: "%s"' % (version, self.connection_string))
self.connector = connection_maker()
self.connector.ConnectionTimeout = self.timeout
self.connector.ConnectionString = self.connection_string
self.connector.Mode = self.mode
try:
self.connector.Open() # Open the ADO connection
except api.Error:
self._raiseConnectionError(
api.DatabaseError,
"ADO error trying to Open=%s" % self.connection_string,
)
try: # Stefan Fuchs; support WINCCOLEDBProvider
if getIndexedValue(self.connector.Properties, "Transaction DDL").Value != 0:
self.supportsTransactions = True
except pywintypes.com_error:
pass # Stefan Fuchs
self.dbms_name = getIndexedValue(self.connector.Properties, "DBMS Name").Value
try: # Stefan Fuchs
self.dbms_version = getIndexedValue(
self.connector.Properties, "DBMS Version"
).Value
except pywintypes.com_error:
pass # Stefan Fuchs
self.connector.CursorLocation = defaultCursorLocation # v2.1 Rose
if self.supportsTransactions:
self.connector.IsolationLevel = defaultIsolationLevel
self._autocommit = bool(kwargs.get("autocommit", False))
if not self._autocommit:
self.transaction_level = (
self.connector.BeginTrans()
) # Disables autocommit & inits transaction_level
else:
self._autocommit = True
if "paramstyle" in kwargs:
self.paramstyle = kwargs["paramstyle"] # let setattr do the error checking
self.messages = []
if verbose:
print("adodbapi New connection at %X" % id(self))
def _raiseConnectionError(self, errorclass, errorvalue):
eh = self.errorhandler
if eh is None:
eh = api.standardErrorHandler
eh(self, None, errorclass, errorvalue)
def _closeAdoConnection(self): # all v2.1 Rose
"""close the underlying ADO Connection object,
rolling it back first if it supports transactions."""
if self.connector is None:
return
if not self._autocommit:
if self.transaction_level:
try:
self.connector.RollbackTrans()
except:
pass
self.connector.Close()
if verbose:
print("adodbapi Closed connection at %X" % id(self))
def close(self):
"""Close the connection now (rather than whenever __del__ is called).
The connection will be unusable from this point forward;
an Error (or subclass) exception will be raised if any operation is attempted with the connection.
The same applies to all cursor objects trying to use the connection.
"""
for crsr in list(self.cursors.values())[
:
]: # copy the list, then close each one
crsr.close(dont_tell_me=True) # close without back-link clearing
self.messages = []
try:
self._closeAdoConnection() # v2.1 Rose
except Exception as e:
self._raiseConnectionError(sys.exc_info()[0], sys.exc_info()[1])
self.connector = None # v2.4.2.2 fix subtle timeout bug
# per M.Hammond: "I expect the benefits of uninitializing are probably fairly small,
# so never uninitializing will probably not cause any problems."
def commit(self):
"""Commit any pending transaction to the database.
Note that if the database supports an auto-commit feature,
this must be initially off. An interface method may be provided to turn it back on.
Database modules that do not support transactions should implement this method with void functionality.
"""
self.messages = []
if not self.supportsTransactions:
return
try:
self.transaction_level = self.connector.CommitTrans()
if verbose > 1:
print("commit done on connection at %X" % id(self))
if not (
self._autocommit
or (self.connector.Attributes & adc.adXactAbortRetaining)
):
# If attributes has adXactCommitRetaining it performs retaining commits that is,
# calling CommitTrans automatically starts a new transaction. Not all providers support this.
# If not, we will have to start a new transaction by this command:
self.transaction_level = self.connector.BeginTrans()
except Exception as e:
self._raiseConnectionError(api.ProgrammingError, e)
def _rollback(self):
"""In case a database does provide transactions this method causes the the database to roll back to
the start of any pending transaction. Closing a connection without committing the changes first will
cause an implicit rollback to be performed.
If the database does not support the functionality required by the method, the interface should
throw an exception in case the method is used.
The preferred approach is to not implement the method and thus have Python generate
an AttributeError in case the method is requested. This allows the programmer to check for database
capabilities using the standard hasattr() function.
For some dynamically configured interfaces it may not be appropriate to require dynamically making
the method available. These interfaces should then raise a NotSupportedError to indicate the
non-ability to perform the roll back when the method is invoked.
"""
self.messages = []
if (
self.transaction_level
): # trying to roll back with no open transaction causes an error
try:
self.transaction_level = self.connector.RollbackTrans()
if verbose > 1:
print("rollback done on connection at %X" % id(self))
if not self._autocommit and not (
self.connector.Attributes & adc.adXactAbortRetaining
):
# If attributes has adXactAbortRetaining it performs retaining aborts that is,
# calling RollbackTrans automatically starts a new transaction. Not all providers support this.
# If not, we will have to start a new transaction by this command:
if (
not self.transaction_level
): # if self.transaction_level == 0 or self.transaction_level is None:
self.transaction_level = self.connector.BeginTrans()
except Exception as e:
self._raiseConnectionError(api.ProgrammingError, e)
def __setattr__(self, name, value):
if name == "autocommit": # extension: allow user to turn autocommit on or off
if self.supportsTransactions:
object.__setattr__(self, "_autocommit", bool(value))
try:
self._rollback() # must clear any outstanding transactions
except:
pass
return
elif name == "paramstyle":
if value not in api.accepted_paramstyles:
self._raiseConnectionError(
api.NotSupportedError,
'paramstyle="%s" not in:%s'
% (value, repr(api.accepted_paramstyles)),
)
elif name == "variantConversions":
value = copy.copy(
value
) # make a new copy -- no changes in the default, please
object.__setattr__(self, name, value)
def __getattr__(self, item):
if (
item == "rollback"
): # the rollback method only appears if the database supports transactions
if self.supportsTransactions:
return (
self._rollback
) # return the rollback method so the caller can execute it.
else:
raise AttributeError("this data provider does not support Rollback")
elif item == "autocommit":
return self._autocommit
else:
raise AttributeError(
'no such attribute in ADO connection object as="%s"' % item
)
def cursor(self):
"Return a new Cursor Object using the connection."
self.messages = []
c = Cursor(self)
return c
def _i_am_here(self, crsr):
"message from a new cursor proclaiming its existence"
oid = id(crsr)
self.cursors[oid] = crsr
def _i_am_closing(self, crsr):
"message from a cursor giving connection a chance to clean up"
try:
del self.cursors[id(crsr)]
except:
pass
def printADOerrors(self):
j = self.connector.Errors.Count
if j:
print("ADO Errors:(%i)" % j)
for e in self.connector.Errors:
print("Description: %s" % e.Description)
print("Error: %s %s " % (e.Number, adc.adoErrors.get(e.Number, "unknown")))
if e.Number == adc.ado_error_TIMEOUT:
print(
"Timeout Error: Try using adodbpi.connect(constr,timeout=Nseconds)"
)
print("Source: %s" % e.Source)
print("NativeError: %s" % e.NativeError)
print("SQL State: %s" % e.SQLState)
def _suggest_error_class(self):
"""Introspect the current ADO Errors and determine an appropriate error class.
Error.SQLState is a SQL-defined error condition, per the SQL specification:
http://www.contrib.andrew.cmu.edu/~shadow/sql/sql1992.txt
The 23000 class of errors are integrity errors.
Error 40002 is a transactional integrity error.
"""
if self.connector is not None:
for e in self.connector.Errors:
state = str(e.SQLState)
if state.startswith("23") or state == "40002":
return api.IntegrityError
return api.DatabaseError
def __del__(self):
try:
self._closeAdoConnection() # v2.1 Rose
except:
pass
self.connector = None
def __enter__(self): # Connections are context managers
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
self._rollback() # automatic rollback on errors
else:
self.commit()
def get_table_names(self):
schema = self.connector.OpenSchema(20) # constant = adSchemaTables
tables = []
while not schema.EOF:
name = getIndexedValue(schema.Fields, "TABLE_NAME").Value
tables.append(name)
schema.MoveNext()
del schema
return tables
The provided code snippet includes necessary dependencies for implementing the `connect` function. Write a Python function `def connect(*args, **kwargs)` to solve the following problem:
Connect to a database. call using: :connection_string -- An ADODB formatted connection string, see: * http://www.connectionstrings.com * http://www.asp101.com/articles/john/connstring/default.asp :timeout -- A command timeout value, in seconds (default 30 seconds)
Here is the function:
def connect(*args, **kwargs): # --> a db-api connection object
"""Connect to a database.
call using:
:connection_string -- An ADODB formatted connection string, see:
* http://www.connectionstrings.com
* http://www.asp101.com/articles/john/connstring/default.asp
:timeout -- A command timeout value, in seconds (default 30 seconds)
"""
co = Connection() # make an empty connection object
kwargs = process_connect_string.process(args, kwargs, True)
try: # connect to the database, using the connection information in kwargs
co.connect(kwargs)
return co
except Exception as e:
message = 'Error opening connection to "%s"' % co.connection_string
raise api.OperationalError(e, message) | Connect to a database. call using: :connection_string -- An ADODB formatted connection string, see: * http://www.connectionstrings.com * http://www.asp101.com/articles/john/connstring/default.asp :timeout -- A command timeout value, in seconds (default 30 seconds) |
171,417 | import copy
import decimal
import os
import sys
import weakref
from . import ado_consts as adc, apibase as api, process_connect_string
from collections.abc import Mapping
The provided code snippet includes necessary dependencies for implementing the `format_parameters` function. Write a Python function `def format_parameters(ADOparameters, show_value=False)` to solve the following problem:
Format a collection of ADO Command Parameters. Used by error reporting in _execute_command.
Here is the function:
def format_parameters(ADOparameters, show_value=False):
"""Format a collection of ADO Command Parameters.
Used by error reporting in _execute_command.
"""
try:
if show_value:
desc = [
'Name: %s, Dir.: %s, Type: %s, Size: %s, Value: "%s", Precision: %s, NumericScale: %s'
% (
p.Name,
adc.directions[p.Direction],
adc.adTypeNames.get(p.Type, str(p.Type) + " (unknown type)"),
p.Size,
p.Value,
p.Precision,
p.NumericScale,
)
for p in ADOparameters
]
else:
desc = [
"Name: %s, Dir.: %s, Type: %s, Size: %s, Precision: %s, NumericScale: %s"
% (
p.Name,
adc.directions[p.Direction],
adc.adTypeNames.get(p.Type, str(p.Type) + " (unknown type)"),
p.Size,
p.Precision,
p.NumericScale,
)
for p in ADOparameters
]
return "[" + "\n".join(desc) + "]"
except:
return "[]" | Format a collection of ADO Command Parameters. Used by error reporting in _execute_command. |
171,418 | import copy
import decimal
import os
import sys
import weakref
from . import ado_consts as adc, apibase as api, process_connect_string
if api.onIronPython:
from clr import Reference
from System import (
Activator,
Array,
Byte,
DateTime,
DBNull,
Decimal as SystemDecimal,
Type,
)
else: # try pywin32
try:
import pythoncom
import pywintypes
import win32com.client
onWin32 = True
except ImportError:
import warnings
warnings.warn(
"pywin32 package (or IronPython) required for adodbapi.", ImportWarning
)
from collections.abc import Mapping
longType = int
StringTypes = str
dateconverter = api.pythonDateTimeConverter()
The provided code snippet includes necessary dependencies for implementing the `_configure_parameter` function. Write a Python function `def _configure_parameter(p, value, adotype, settings_known)` to solve the following problem:
Configure the given ADO Parameter 'p' with the Python 'value'.
Here is the function:
def _configure_parameter(p, value, adotype, settings_known):
"""Configure the given ADO Parameter 'p' with the Python 'value'."""
if adotype in api.adoBinaryTypes:
p.Size = len(value)
p.AppendChunk(value)
elif isinstance(value, StringTypes): # v2.1 Jevon
L = len(value)
if adotype in api.adoStringTypes: # v2.2.1 Cole
if settings_known:
L = min(L, p.Size) # v2.1 Cole limit data to defined size
p.Value = value[:L] # v2.1 Jevon & v2.1 Cole
else:
p.Value = value # dont limit if db column is numeric
if L > 0: # v2.1 Cole something does not like p.Size as Zero
p.Size = L # v2.1 Jevon
elif isinstance(value, decimal.Decimal):
if api.onIronPython:
s = str(value)
p.Value = s
p.Size = len(s)
else:
p.Value = value
exponent = value.as_tuple()[2]
digit_count = len(value.as_tuple()[1])
p.Precision = digit_count
if exponent == 0:
p.NumericScale = 0
elif exponent < 0:
p.NumericScale = -exponent
if p.Precision < p.NumericScale:
p.Precision = p.NumericScale
else: # exponent > 0:
p.NumericScale = 0
p.Precision = digit_count + exponent
elif type(value) in dateconverter.types:
if settings_known and adotype in api.adoDateTimeTypes:
p.Value = dateconverter.COMDate(value)
else: # probably a string
# provide the date as a string in the format 'YYYY-MM-dd'
s = dateconverter.DateObjectToIsoFormatString(value)
p.Value = s
p.Size = len(s)
elif api.onIronPython and isinstance(value, longType): # Iron Python Long
s = str(value) # feature workaround for IPy 2.0
p.Value = s
elif adotype == adc.adEmpty: # ADO will not let you specify a null column
p.Type = (
adc.adInteger
) # so we will fake it to be an integer (just to have something)
p.Value = None # and pass in a Null *value*
# For any other type, set the value and let pythoncom do the right thing.
else:
p.Value = value | Configure the given ADO Parameter 'p' with the Python 'value'. |
171,419 | directions = {
0: "Unknown",
1: "Input",
2: "Output",
3: "InputOutput",
4: "Return",
}
def ado_direction_name(ado_dir):
try:
return "adParam" + directions[ado_dir]
except:
return "unknown direction (" + str(ado_dir) + ")" | null |
171,420 | adTypeNames = {
adBSTR: "adBSTR",
adBigInt: "adBigInt",
adBinary: "adBinary",
adBoolean: "adBoolean",
adChapter: "adChapter",
adChar: "adChar",
adCurrency: "adCurrency",
adDBDate: "adDBDate",
adDBTime: "adDBTime",
adDBTimeStamp: "adDBTimeStamp",
adDate: "adDate",
adDecimal: "adDecimal",
adDouble: "adDouble",
adEmpty: "adEmpty",
adError: "adError",
adFileTime: "adFileTime",
adGUID: "adGUID",
adIDispatch: "adIDispatch",
adIUnknown: "adIUnknown",
adInteger: "adInteger",
adLongVarBinary: "adLongVarBinary",
adLongVarChar: "adLongVarChar",
adLongVarWChar: "adLongVarWChar",
adNumeric: "adNumeric",
adPropVariant: "adPropVariant",
adSingle: "adSingle",
adSmallInt: "adSmallInt",
adTinyInt: "adTinyInt",
adUnsignedBigInt: "adUnsignedBigInt",
adUnsignedInt: "adUnsignedInt",
adUnsignedSmallInt: "adUnsignedSmallInt",
adUnsignedTinyInt: "adUnsignedTinyInt",
adUserDefined: "adUserDefined",
adVarBinary: "adVarBinary",
adVarChar: "adVarChar",
adVarNumeric: "adVarNumeric",
adVarWChar: "adVarWChar",
adVariant: "adVariant",
adWChar: "adWChar",
}
def ado_type_name(ado_type):
return adTypeNames.get(ado_type, "unknown type (" + str(ado_type) + ")") | null |
171,421 | from __future__ import absolute_import
import re
def prepare_child(next, token):
tag = token[1]
def select(result):
for elem in result:
for e in elem.iterchildren(tag):
yield e
return select | null |
171,422 | from __future__ import absolute_import
import re
def prepare_star(next, token):
def select(result):
for elem in result:
for e in elem.iterchildren('*'):
yield e
return select | null |
171,423 | from __future__ import absolute_import
import re
def prepare_self(next, token):
def select(result):
return result
return select | null |
171,424 | from __future__ import absolute_import
import re
def prepare_descendant(next, token):
token = next()
if token[0] == "*":
tag = "*"
elif not token[0]:
tag = token[1]
else:
raise SyntaxError("invalid descendant")
def select(result):
for elem in result:
for e in elem.iterdescendants(tag):
yield e
return select | null |
171,425 | from __future__ import absolute_import
import re
def prepare_parent(next, token):
def select(result):
for elem in result:
parent = elem.getparent()
if parent is not None:
yield parent
return select | null |
171,426 | from __future__ import absolute_import
import re
def prepare_predicate(next, token):
# FIXME: replace with real parser!!! refs:
# http://effbot.org/zone/simple-iterator-parser.htm
# http://javascript.crockford.com/tdop/tdop.html
signature = ''
predicate = []
while 1:
token = next()
if token[0] == "]":
break
if token == ('', ''):
# ignore whitespace
continue
if token[0] and token[0][:1] in "'\"":
token = "'", token[0][1:-1]
signature += token[0] or "-"
predicate.append(token[1])
# use signature to determine predicate type
if signature == "@-":
# [@attribute] predicate
key = predicate[1]
def select(result):
for elem in result:
if elem.get(key) is not None:
yield elem
return select
if signature == "@-='":
# [@attribute='value']
key = predicate[1]
value = predicate[-1]
def select(result):
for elem in result:
if elem.get(key) == value:
yield elem
return select
if signature == "-" and not re.match(r"-?\d+$", predicate[0]):
# [tag]
tag = predicate[0]
def select(result):
for elem in result:
for _ in elem.iterchildren(tag):
yield elem
break
return select
if signature == ".='" or (signature == "-='" and not re.match(r"-?\d+$", predicate[0])):
# [.='value'] or [tag='value']
tag = predicate[0]
value = predicate[-1]
if tag:
def select(result):
for elem in result:
for e in elem.iterchildren(tag):
if "".join(e.itertext()) == value:
yield elem
break
else:
def select(result):
for elem in result:
if "".join(elem.itertext()) == value:
yield elem
return select
if signature == "-" or signature == "-()" or signature == "-()-":
# [index] or [last()] or [last()-index]
if signature == "-":
# [index]
index = int(predicate[0]) - 1
if index < 0:
if index == -1:
raise SyntaxError(
"indices in path predicates are 1-based, not 0-based")
else:
raise SyntaxError("path index >= 1 expected")
else:
if predicate[0] != "last":
raise SyntaxError("unsupported function")
if signature == "-()-":
try:
index = int(predicate[2]) - 1
except ValueError:
raise SyntaxError("unsupported expression")
else:
index = -1
def select(result):
for elem in result:
parent = elem.getparent()
if parent is None:
continue
try:
# FIXME: what if the selector is "*" ?
elems = list(parent.iterchildren(elem.tag))
if elems[index] is elem:
yield elem
except IndexError:
pass
return select
raise SyntaxError("invalid predicate") | null |
171,427 | from __future__ import absolute_import
import re
def find(elem, path, namespaces=None):
it = iterfind(elem, path, namespaces)
try:
return next(it)
except StopIteration:
return None
def findtext(elem, path, default=None, namespaces=None):
el = find(elem, path, namespaces)
if el is None:
return default
else:
return el.text or '' | null |
171,428 | from __future__ import absolute_import
import difflib
from lxml import etree
from lxml.html import fragment_fromstring
import re
def default_markup(text, version):
return '<span title="%s">%s</span>' % (
html_escape(_unicode(version), 1), text)
def tokenize_annotated(doc, annotation):
"""Tokenize a document and add an annotation attribute to each token
"""
tokens = tokenize(doc, include_hrefs=False)
for tok in tokens:
tok.annotation = annotation
return tokens
def html_annotate_merge_annotations(tokens_old, tokens_new):
"""Merge the annotations from tokens_old into tokens_new, when the
tokens in the new document already existed in the old document.
"""
s = InsensitiveSequenceMatcher(a=tokens_old, b=tokens_new)
commands = s.get_opcodes()
for command, i1, i2, j1, j2 in commands:
if command == 'equal':
eq_old = tokens_old[i1:i2]
eq_new = tokens_new[j1:j2]
copy_annotations(eq_old, eq_new)
def compress_tokens(tokens):
"""
Combine adjacent tokens when there is no HTML between the tokens,
and they share an annotation
"""
result = [tokens[0]]
for tok in tokens[1:]:
if (not result[-1].post_tags and
not tok.pre_tags and
result[-1].annotation == tok.annotation):
compress_merge_back(result, tok)
else:
result.append(tok)
return result
def markup_serialize_tokens(tokens, markup_func):
"""
Serialize the list of tokens into a list of text chunks, calling
markup_func around text to add annotations.
"""
for token in tokens:
for pre in token.pre_tags:
yield pre
html = token.html()
html = markup_func(html, token.annotation)
if token.trailing_whitespace:
html += token.trailing_whitespace
yield html
for post in token.post_tags:
yield post
The provided code snippet includes necessary dependencies for implementing the `html_annotate` function. Write a Python function `def html_annotate(doclist, markup=default_markup)` to solve the following problem:
doclist should be ordered from oldest to newest, like:: >>> version1 = 'Hello World' >>> version2 = 'Goodbye World' >>> print(html_annotate([(version1, 'version 1'), ... (version2, 'version 2')])) <span title="version 2">Goodbye</span> <span title="version 1">World</span> The documents must be *fragments* (str/UTF8 or unicode), not complete documents The markup argument is a function to markup the spans of words. This function is called like markup('Hello', 'version 2'), and returns HTML. The first argument is text and never includes any markup. The default uses a span with a title: >>> print(default_markup('Some Text', 'by Joe')) <span title="by Joe">Some Text</span>
Here is the function:
def html_annotate(doclist, markup=default_markup):
"""
doclist should be ordered from oldest to newest, like::
>>> version1 = 'Hello World'
>>> version2 = 'Goodbye World'
>>> print(html_annotate([(version1, 'version 1'),
... (version2, 'version 2')]))
<span title="version 2">Goodbye</span> <span title="version 1">World</span>
The documents must be *fragments* (str/UTF8 or unicode), not
complete documents
The markup argument is a function to markup the spans of words.
This function is called like markup('Hello', 'version 2'), and
returns HTML. The first argument is text and never includes any
markup. The default uses a span with a title:
>>> print(default_markup('Some Text', 'by Joe'))
<span title="by Joe">Some Text</span>
"""
# The basic strategy we have is to split the documents up into
# logical tokens (which are words with attached markup). We then
# do diffs of each of the versions to track when a token first
# appeared in the document; the annotation attached to the token
# is the version where it first appeared.
tokenlist = [tokenize_annotated(doc, version)
for doc, version in doclist]
cur_tokens = tokenlist[0]
for tokens in tokenlist[1:]:
html_annotate_merge_annotations(cur_tokens, tokens)
cur_tokens = tokens
# After we've tracked all the tokens, we can combine spans of text
# that are adjacent and have the same annotation
cur_tokens = compress_tokens(cur_tokens)
# And finally add markup
result = markup_serialize_tokens(cur_tokens, markup)
return ''.join(result).strip() | doclist should be ordered from oldest to newest, like:: >>> version1 = 'Hello World' >>> version2 = 'Goodbye World' >>> print(html_annotate([(version1, 'version 1'), ... (version2, 'version 2')])) <span title="version 2">Goodbye</span> <span title="version 1">World</span> The documents must be *fragments* (str/UTF8 or unicode), not complete documents The markup argument is a function to markup the spans of words. This function is called like markup('Hello', 'version 2'), and returns HTML. The first argument is text and never includes any markup. The default uses a span with a title: >>> print(default_markup('Some Text', 'by Joe')) <span title="by Joe">Some Text</span> |
171,429 | from __future__ import absolute_import
import difflib
from lxml import etree
from lxml.html import fragment_fromstring
import re
def htmldiff_tokens(html1_tokens, html2_tokens):
""" Does a diff on the tokens themselves, returning a list of text
chunks (not tokens).
"""
# There are several passes as we do the differences. The tokens
# isolate the portion of the content we care to diff; difflib does
# all the actual hard work at that point.
#
# Then we must create a valid document from pieces of both the old
# document and the new document. We generally prefer to take
# markup from the new document, and only do a best effort attempt
# to keep markup from the old document; anything that we can't
# resolve we throw away. Also we try to put the deletes as close
# to the location where we think they would have been -- because
# we are only keeping the markup from the new document, it can be
# fuzzy where in the new document the old text would have gone.
# Again we just do a best effort attempt.
s = InsensitiveSequenceMatcher(a=html1_tokens, b=html2_tokens)
commands = s.get_opcodes()
result = []
for command, i1, i2, j1, j2 in commands:
if command == 'equal':
result.extend(expand_tokens(html2_tokens[j1:j2], equal=True))
continue
if command == 'insert' or command == 'replace':
ins_tokens = expand_tokens(html2_tokens[j1:j2])
merge_insert(ins_tokens, result)
if command == 'delete' or command == 'replace':
del_tokens = expand_tokens(html1_tokens[i1:i2])
merge_delete(del_tokens, result)
# If deletes were inserted directly as <del> then we'd have an
# invalid document at this point. Instead we put in special
# markers, and when the complete diffed document has been created
# we try to move the deletes around and resolve any problems.
result = cleanup_delete(result)
return result
def tokenize(html, include_hrefs=True):
"""
Parse the given HTML and returns token objects (words with attached tags).
This parses only the content of a page; anything in the head is
ignored, and the <head> and <body> elements are themselves
optional. The content is then parsed by lxml, which ensures the
validity of the resulting parsed document (though lxml may make
incorrect guesses when the markup is particular bad).
<ins> and <del> tags are also eliminated from the document, as
that gets confusing.
If include_hrefs is true, then the href attribute of <a> tags is
included as a special kind of diffable token."""
if etree.iselement(html):
body_el = html
else:
body_el = parse_html(html, cleanup=True)
# Then we split the document into text chunks for each tag, word, and end tag:
chunks = flatten_el(body_el, skip_tag=True, include_hrefs=include_hrefs)
# Finally re-joining them into token objects:
return fixup_chunks(chunks)
def fixup_ins_del_tags(html):
""" Given an html string, move any <ins> or <del> tags inside of any
block-level elements, e.g. transform <ins><p>word</p></ins> to
<p><ins>word</ins></p> """
doc = parse_html(html, cleanup=False)
_fixup_ins_del_tags(doc)
html = serialize_html_fragment(doc, skip_outer=True)
return html
The provided code snippet includes necessary dependencies for implementing the `htmldiff` function. Write a Python function `def htmldiff(old_html, new_html)` to solve the following problem:
Do a diff of the old and new document. The documents are HTML *fragments* (str/UTF8 or unicode), they are not complete documents (i.e., no <html> tag). Returns HTML with <ins> and <del> tags added around the appropriate text. Markup is generally ignored, with the markup from new_html preserved, and possibly some markup from old_html (though it is considered acceptable to lose some of the old markup). Only the words in the HTML are diffed. The exception is <img> tags, which are treated like words, and the href attribute of <a> tags, which are noted inside the tag itself when there are changes.
Here is the function:
def htmldiff(old_html, new_html):
## FIXME: this should take parsed documents too, and use their body
## or other content.
""" Do a diff of the old and new document. The documents are HTML
*fragments* (str/UTF8 or unicode), they are not complete documents
(i.e., no <html> tag).
Returns HTML with <ins> and <del> tags added around the
appropriate text.
Markup is generally ignored, with the markup from new_html
preserved, and possibly some markup from old_html (though it is
considered acceptable to lose some of the old markup). Only the
words in the HTML are diffed. The exception is <img> tags, which
are treated like words, and the href attribute of <a> tags, which
are noted inside the tag itself when there are changes.
"""
old_html_tokens = tokenize(old_html)
new_html_tokens = tokenize(new_html)
result = htmldiff_tokens(old_html_tokens, new_html_tokens)
result = ''.join(result).strip()
return fixup_ins_del_tags(result) | Do a diff of the old and new document. The documents are HTML *fragments* (str/UTF8 or unicode), they are not complete documents (i.e., no <html> tag). Returns HTML with <ins> and <del> tags added around the appropriate text. Markup is generally ignored, with the markup from new_html preserved, and possibly some markup from old_html (though it is considered acceptable to lose some of the old markup). Only the words in the HTML are diffed. The exception is <img> tags, which are treated like words, and the href attribute of <a> tags, which are noted inside the tag itself when there are changes. |
171,430 | from __future__ import absolute_import
import difflib
from lxml import etree
from lxml.html import fragment_fromstring
import re
The provided code snippet includes necessary dependencies for implementing the `_merge_element_contents` function. Write a Python function `def _merge_element_contents(el)` to solve the following problem:
Removes an element, but merges its contents into its place, e.g., given <p>Hi <i>there!</i></p>, if you remove the <i> element you get <p>Hi there!</p>
Here is the function:
def _merge_element_contents(el):
"""
Removes an element, but merges its contents into its place, e.g.,
given <p>Hi <i>there!</i></p>, if you remove the <i> element you get
<p>Hi there!</p>
"""
parent = el.getparent()
text = el.text or ''
if el.tail:
if not len(el):
text += el.tail
else:
if el[-1].tail:
el[-1].tail += el.tail
else:
el[-1].tail = el.tail
index = parent.index(el)
if text:
if index == 0:
previous = None
else:
previous = parent[index-1]
if previous is None:
if parent.text:
parent.text += text
else:
parent.text = text
else:
if previous.tail:
previous.tail += text
else:
previous.tail = text
parent[index:index+1] = el.getchildren() | Removes an element, but merges its contents into its place, e.g., given <p>Hi <i>there!</i></p>, if you remove the <i> element you get <p>Hi there!</p> |
171,431 | from lxml.etree import XPath, ElementBase
from lxml.html import fromstring, XHTML_NAMESPACE
from lxml.html import _forms_xpath, _options_xpath, _nons, _transform_result
from lxml.html import defs
import copy
try:
basestring
except NameError:
# Python 3
basestring = str
def fill_form(
el,
values,
form_id=None,
form_index=None,
):
el = _find_form(el, form_id=form_id, form_index=form_index)
_fill_form(el, values)
def _transform_result(typ, result):
"""Convert the result back into the input type.
"""
if issubclass(typ, bytes):
return tostring(result, encoding='utf-8')
elif issubclass(typ, unicode):
return tostring(result, encoding='unicode')
else:
return result
def fromstring(html, base_url=None, parser=None, **kw):
"""
Parse the html, returning a single element/document.
This tries to minimally parse the chunk of text, without knowing if it
is a fragment or a document.
base_url will set the document's base_url attribute (and the tree's docinfo.URL)
"""
if parser is None:
parser = html_parser
if isinstance(html, bytes):
is_full_html = _looks_like_full_html_bytes(html)
else:
is_full_html = _looks_like_full_html_unicode(html)
doc = document_fromstring(html, parser=parser, base_url=base_url, **kw)
if is_full_html:
return doc
# otherwise, lets parse it out...
bodies = doc.findall('body')
if not bodies:
bodies = doc.findall('{%s}body' % XHTML_NAMESPACE)
if bodies:
body = bodies[0]
if len(bodies) > 1:
# Somehow there are multiple bodies, which is bad, but just
# smash them into one body
for other_body in bodies[1:]:
if other_body.text:
if len(body):
body[-1].tail = (body[-1].tail or '') + other_body.text
else:
body.text = (body.text or '') + other_body.text
body.extend(other_body)
# We'll ignore tail
# I guess we are ignoring attributes too
other_body.drop_tree()
else:
body = None
heads = doc.findall('head')
if not heads:
heads = doc.findall('{%s}head' % XHTML_NAMESPACE)
if heads:
# Well, we have some sort of structure, so lets keep it all
head = heads[0]
if len(heads) > 1:
for other_head in heads[1:]:
head.extend(other_head)
# We don't care about text or tail in a head
other_head.drop_tree()
return doc
if body is None:
return doc
if (len(body) == 1 and (not body.text or not body.text.strip())
and (not body[-1].tail or not body[-1].tail.strip())):
# The body has just one element, so it was probably a single
# element passed in
return body[0]
# Now we have a body which represents a bunch of tags which have the
# content that was passed in. We will create a fake container, which
# is the body tag, except <body> implies too much structure.
if _contains_block_level_tag(body):
body.tag = 'div'
else:
body.tag = 'span'
return body
def fill_form_html(html, values, form_id=None, form_index=None):
result_type = type(html)
if isinstance(html, basestring):
doc = fromstring(html)
else:
doc = copy.deepcopy(html)
fill_form(doc, values, form_id=form_id, form_index=form_index)
return _transform_result(result_type, doc) | null |
171,432 | from lxml.etree import XPath, ElementBase
from lxml.html import fromstring, XHTML_NAMESPACE
from lxml.html import _forms_xpath, _options_xpath, _nons, _transform_result
from lxml.html import defs
import copy
try:
basestring
except NameError:
# Python 3
basestring = str
def insert_errors(
el,
errors,
form_id=None,
form_index=None,
error_class="error",
error_creator=default_error_creator,
):
def _transform_result(typ, result):
def fromstring(html, base_url=None, parser=None, **kw):
def insert_errors_html(html, values, **kw):
result_type = type(html)
if isinstance(html, basestring):
doc = fromstring(html)
else:
doc = copy.deepcopy(html)
insert_errors(doc, values, **kw)
return _transform_result(result_type, doc) | null |
171,433 | from __future__ import absolute_import
import copy
import re
import sys
from lxml import etree
from lxml.html import defs
from lxml.html import fromstring, XHTML_NAMESPACE
from lxml.html import xhtml_to_html, _transform_result
_find_image_dataurls = re.compile(
r'data:image/(.+);base64,', re.I).findall
_possibly_malicious_schemes = re.compile(
r'(javascript|jscript|livescript|vbscript|data|about|mocha):',
re.I).findall
_is_unsafe_image_type = re.compile(r"(xml|svg)", re.I).search
def _has_javascript_scheme(s):
safe_image_urls = 0
for image_type in _find_image_dataurls(s):
if _is_unsafe_image_type(image_type):
return True
safe_image_urls += 1
return len(_possibly_malicious_schemes(s)) > safe_image_urls | null |
171,434 | from __future__ import absolute_import
import copy
import re
import sys
from lxml import etree
from lxml.html import defs
from lxml.html import fromstring, XHTML_NAMESPACE
from lxml.html import xhtml_to_html, _transform_result
try:
basestring
except NameError:
basestring = (str, bytes)
def autolink(el, link_regexes=_link_regexes,
avoid_elements=_avoid_elements,
avoid_hosts=_avoid_hosts,
avoid_classes=_avoid_classes):
"""
Turn any URLs into links.
It will search for links identified by the given regular
expressions (by default mailto and http(s) links).
It won't link text in an element in avoid_elements, or an element
with a class in avoid_classes. It won't link to anything with a
host that matches one of the regular expressions in avoid_hosts
(default localhost and 127.0.0.1).
If you pass in an element, the element's tail will not be
substituted, only the contents of the element.
"""
if el.tag in avoid_elements:
return
class_name = el.get('class')
if class_name:
class_name = class_name.split()
for match_class in avoid_classes:
if match_class in class_name:
return
for child in list(el):
autolink(child, link_regexes=link_regexes,
avoid_elements=avoid_elements,
avoid_hosts=avoid_hosts,
avoid_classes=avoid_classes)
if child.tail:
text, tail_children = _link_text(
child.tail, link_regexes, avoid_hosts, factory=el.makeelement)
if tail_children:
child.tail = text
index = el.index(child)
el[index+1:index+1] = tail_children
if el.text:
text, pre_children = _link_text(
el.text, link_regexes, avoid_hosts, factory=el.makeelement)
if pre_children:
el.text = text
el[:0] = pre_children
def _transform_result(typ, result):
"""Convert the result back into the input type.
"""
if issubclass(typ, bytes):
return tostring(result, encoding='utf-8')
elif issubclass(typ, unicode):
return tostring(result, encoding='unicode')
else:
return result
def fromstring(html, base_url=None, parser=None, **kw):
"""
Parse the html, returning a single element/document.
This tries to minimally parse the chunk of text, without knowing if it
is a fragment or a document.
base_url will set the document's base_url attribute (and the tree's docinfo.URL)
"""
if parser is None:
parser = html_parser
if isinstance(html, bytes):
is_full_html = _looks_like_full_html_bytes(html)
else:
is_full_html = _looks_like_full_html_unicode(html)
doc = document_fromstring(html, parser=parser, base_url=base_url, **kw)
if is_full_html:
return doc
# otherwise, lets parse it out...
bodies = doc.findall('body')
if not bodies:
bodies = doc.findall('{%s}body' % XHTML_NAMESPACE)
if bodies:
body = bodies[0]
if len(bodies) > 1:
# Somehow there are multiple bodies, which is bad, but just
# smash them into one body
for other_body in bodies[1:]:
if other_body.text:
if len(body):
body[-1].tail = (body[-1].tail or '') + other_body.text
else:
body.text = (body.text or '') + other_body.text
body.extend(other_body)
# We'll ignore tail
# I guess we are ignoring attributes too
other_body.drop_tree()
else:
body = None
heads = doc.findall('head')
if not heads:
heads = doc.findall('{%s}head' % XHTML_NAMESPACE)
if heads:
# Well, we have some sort of structure, so lets keep it all
head = heads[0]
if len(heads) > 1:
for other_head in heads[1:]:
head.extend(other_head)
# We don't care about text or tail in a head
other_head.drop_tree()
return doc
if body is None:
return doc
if (len(body) == 1 and (not body.text or not body.text.strip())
and (not body[-1].tail or not body[-1].tail.strip())):
# The body has just one element, so it was probably a single
# element passed in
return body[0]
# Now we have a body which represents a bunch of tags which have the
# content that was passed in. We will create a fake container, which
# is the body tag, except <body> implies too much structure.
if _contains_block_level_tag(body):
body.tag = 'div'
else:
body.tag = 'span'
return body
def autolink_html(html, *args, **kw):
result_type = type(html)
if isinstance(html, basestring):
doc = fromstring(html)
else:
doc = copy.deepcopy(html)
autolink(doc, *args, **kw)
return _transform_result(result_type, doc) | null |
171,435 | from __future__ import absolute_import
import copy
import re
import sys
from lxml import etree
from lxml.html import defs
from lxml.html import fromstring, XHTML_NAMESPACE
from lxml.html import xhtml_to_html, _transform_result
def word_break(el, max_width=40,
avoid_elements=_avoid_word_break_elements,
avoid_classes=_avoid_word_break_classes,
break_character=unichr(0x200b)):
def _transform_result(typ, result):
def fromstring(html, base_url=None, parser=None, **kw):
def word_break_html(html, *args, **kw):
result_type = type(html)
doc = fromstring(html)
word_break(doc, *args, **kw)
return _transform_result(result_type, doc) | null |
171,436 | from .soupparser import convert_tree, parse as _parse
def parse(file, beautifulsoup=None, makeelement=None):
root = _parse(file, beautifulsoup=beautifulsoup, makeelement=makeelement)
return root.getroot() | null |
171,437 | import sys
import string
from html5lib import HTMLParser as _HTMLParser
from html5lib.treebuilders.etree_lxml import TreeBuilder
from lxml import etree
from lxml.html import Element, XHTML_NAMESPACE, _contains_block_level_tag
try:
_strings = basestring
except NameError:
_strings = (bytes, str)
def fragments_fromstring(html, no_leading_text=False,
guess_charset=None, parser=None):
"""Parses several HTML elements, returning a list of elements.
The first item in the list may be a string. If no_leading_text is true,
then it will be an error if there is leading text, and it will always be
a list of only elements.
If `guess_charset` is true, the `chardet` library will perform charset
guessing on the string.
"""
if not isinstance(html, _strings):
raise TypeError('string required')
if parser is None:
parser = html_parser
options = {}
if guess_charset is None and isinstance(html, bytes):
# html5lib does not accept useChardet as an argument, if it
# detected the html argument would produce unicode objects.
guess_charset = False
if guess_charset is not None:
options['useChardet'] = guess_charset
children = parser.parseFragment(html, 'div', **options)
if children and isinstance(children[0], _strings):
if no_leading_text:
if children[0].strip():
raise etree.ParserError('There is leading text: %r' %
children[0])
del children[0]
return children
def Element(*args, **kw):
"""Create a new HTML Element.
This can also be used for XHTML documents.
"""
v = html_parser.makeelement(*args, **kw)
return v
The provided code snippet includes necessary dependencies for implementing the `fragment_fromstring` function. Write a Python function `def fragment_fromstring(html, create_parent=False, guess_charset=None, parser=None)` to solve the following problem:
Parses a single HTML element; it is an error if there is more than one element, or if anything but whitespace precedes or follows the element. If 'create_parent' is true (or is a tag name) then a parent node will be created to encapsulate the HTML in a single element. In this case, leading or trailing text is allowed. If `guess_charset` is true, the `chardet` library will perform charset guessing on the string.
Here is the function:
def fragment_fromstring(html, create_parent=False,
guess_charset=None, parser=None):
"""Parses a single HTML element; it is an error if there is more than
one element, or if anything but whitespace precedes or follows the
element.
If 'create_parent' is true (or is a tag name) then a parent node
will be created to encapsulate the HTML in a single element. In
this case, leading or trailing text is allowed.
If `guess_charset` is true, the `chardet` library will perform charset
guessing on the string.
"""
if not isinstance(html, _strings):
raise TypeError('string required')
accept_leading_text = bool(create_parent)
elements = fragments_fromstring(
html, guess_charset=guess_charset, parser=parser,
no_leading_text=not accept_leading_text)
if create_parent:
if not isinstance(create_parent, _strings):
create_parent = 'div'
new_root = Element(create_parent)
if elements:
if isinstance(elements[0], _strings):
new_root.text = elements[0]
del elements[0]
new_root.extend(elements)
return new_root
if not elements:
raise etree.ParserError('No elements found')
if len(elements) > 1:
raise etree.ParserError('Multiple elements found')
result = elements[0]
if result.tail and result.tail.strip():
raise etree.ParserError('Element followed by text: %r' % result.tail)
result.tail = None
return result | Parses a single HTML element; it is an error if there is more than one element, or if anything but whitespace precedes or follows the element. If 'create_parent' is true (or is a tag name) then a parent node will be created to encapsulate the HTML in a single element. In this case, leading or trailing text is allowed. If `guess_charset` is true, the `chardet` library will perform charset guessing on the string. |
171,438 | import sys
import string
from html5lib import HTMLParser as _HTMLParser
from html5lib.treebuilders.etree_lxml import TreeBuilder
from lxml import etree
from lxml.html import Element, XHTML_NAMESPACE, _contains_block_level_tag
try:
_strings = basestring
except NameError:
_strings = (bytes, str)
def _find_tag(tree, tag):
elem = tree.find(tag)
if elem is not None:
return elem
return tree.find('{%s}%s' % (XHTML_NAMESPACE, tag))
def document_fromstring(html, guess_charset=None, parser=None):
"""
Parse a whole document into a string.
If `guess_charset` is true, or if the input is not Unicode but a
byte string, the `chardet` library will perform charset guessing
on the string.
"""
if not isinstance(html, _strings):
raise TypeError('string required')
if parser is None:
parser = html_parser
options = {}
if guess_charset is None and isinstance(html, bytes):
# html5lib does not accept useChardet as an argument, if it
# detected the html argument would produce unicode objects.
guess_charset = True
if guess_charset is not None:
options['useChardet'] = guess_charset
return parser.parse(html, **options).getroot()
def _contains_block_level_tag(el):
# FIXME: I could do this with XPath, but would that just be
# unnecessarily slow?
for el in el.iter(etree.Element):
if _nons(el.tag) in defs.block_tags:
return True
return False
The provided code snippet includes necessary dependencies for implementing the `fromstring` function. Write a Python function `def fromstring(html, guess_charset=None, parser=None)` to solve the following problem:
Parse the html, returning a single element/document. This tries to minimally parse the chunk of text, without knowing if it is a fragment or a document. 'base_url' will set the document's base_url attribute (and the tree's docinfo.URL) If `guess_charset` is true, or if the input is not Unicode but a byte string, the `chardet` library will perform charset guessing on the string.
Here is the function:
def fromstring(html, guess_charset=None, parser=None):
"""Parse the html, returning a single element/document.
This tries to minimally parse the chunk of text, without knowing if it
is a fragment or a document.
'base_url' will set the document's base_url attribute (and the tree's
docinfo.URL)
If `guess_charset` is true, or if the input is not Unicode but a
byte string, the `chardet` library will perform charset guessing
on the string.
"""
if not isinstance(html, _strings):
raise TypeError('string required')
doc = document_fromstring(html, parser=parser,
guess_charset=guess_charset)
# document starts with doctype or <html>, full document!
start = html[:50]
if isinstance(start, bytes):
# Allow text comparison in python3.
# Decode as ascii, that also covers latin-1 and utf-8 for the
# characters we need.
start = start.decode('ascii', 'replace')
start = start.lstrip().lower()
if start.startswith('<html') or start.startswith('<!doctype'):
return doc
head = _find_tag(doc, 'head')
# if the head is not empty we have a full document
if len(head):
return doc
body = _find_tag(doc, 'body')
# The body has just one element, so it was probably a single
# element passed in
if (len(body) == 1 and (not body.text or not body.text.strip())
and (not body[-1].tail or not body[-1].tail.strip())):
return body[0]
# Now we have a body which represents a bunch of tags which have the
# content that was passed in. We will create a fake container, which
# is the body tag, except <body> implies too much structure.
if _contains_block_level_tag(body):
body.tag = 'div'
else:
body.tag = 'span'
return body | Parse the html, returning a single element/document. This tries to minimally parse the chunk of text, without knowing if it is a fragment or a document. 'base_url' will set the document's base_url attribute (and the tree's docinfo.URL) If `guess_charset` is true, or if the input is not Unicode but a byte string, the `chardet` library will perform charset guessing on the string. |
171,439 | from __future__ import absolute_import
import optparse
import sys
import re
import os
from .diff import htmldiff
def read_file(filename):
if filename == '-':
c = sys.stdin.read()
elif not os.path.exists(filename):
raise OSError(
"Input file %s does not exist" % filename)
else:
with open(filename, 'rb') as f:
c = f.read()
return c | null |
171,440 | from __future__ import absolute_import
import optparse
import sys
import re
import os
from .diff import htmldiff
body_start_re = re.compile(
r"<body.*?>", re.I|re.S)
body_end_re = re.compile(
r"</body.*?>", re.I|re.S)
def split_body(html):
pre = post = ''
match = body_start_re.search(html)
if match:
pre = html[:match.end()]
html = html[match.end():]
match = body_end_re.search(html)
if match:
post = html[match.start():]
html = html[:match.start()]
return pre, html, post | null |
171,441 | from __future__ import absolute_import
import optparse
import sys
import re
import os
from .diff import htmldiff
def annotate(options, args):
print("Not yet implemented")
sys.exit(1) | null |
171,442 | from lxml.builder import ElementMaker
from lxml.html import html_parser
def CLASS(v): return {'class': v} | null |
171,443 | from lxml.builder import ElementMaker
from lxml.html import html_parser
def FOR(v): return {'for': v} | null |
171,444 | import re
from lxml import etree, html
def _parse(source, beautifulsoup, makeelement, **bsargs):
if beautifulsoup is None:
beautifulsoup = BeautifulSoup
if hasattr(beautifulsoup, "HTML_ENTITIES"): # bs3
if 'convertEntities' not in bsargs:
bsargs['convertEntities'] = 'html'
if hasattr(beautifulsoup, "DEFAULT_BUILDER_FEATURES"): # bs4
if 'features' not in bsargs:
bsargs['features'] = 'html.parser' # use Python html parser
tree = beautifulsoup(source, **bsargs)
root = _convert_tree(tree, makeelement)
# from ET: wrap the document in a html root element, if necessary
if len(root) == 1 and root[0].tag == "html":
return root[0]
root.tag = "html"
return root
The provided code snippet includes necessary dependencies for implementing the `fromstring` function. Write a Python function `def fromstring(data, beautifulsoup=None, makeelement=None, **bsargs)` to solve the following problem:
Parse a string of HTML data into an Element tree using the BeautifulSoup parser. Returns the root ``<html>`` Element of the tree. You can pass a different BeautifulSoup parser through the `beautifulsoup` keyword, and a diffent Element factory function through the `makeelement` keyword. By default, the standard ``BeautifulSoup`` class and the default factory of `lxml.html` are used.
Here is the function:
def fromstring(data, beautifulsoup=None, makeelement=None, **bsargs):
"""Parse a string of HTML data into an Element tree using the
BeautifulSoup parser.
Returns the root ``<html>`` Element of the tree.
You can pass a different BeautifulSoup parser through the
`beautifulsoup` keyword, and a diffent Element factory function
through the `makeelement` keyword. By default, the standard
``BeautifulSoup`` class and the default factory of `lxml.html` are
used.
"""
return _parse(data, beautifulsoup, makeelement, **bsargs) | Parse a string of HTML data into an Element tree using the BeautifulSoup parser. Returns the root ``<html>`` Element of the tree. You can pass a different BeautifulSoup parser through the `beautifulsoup` keyword, and a diffent Element factory function through the `makeelement` keyword. By default, the standard ``BeautifulSoup`` class and the default factory of `lxml.html` are used. |
171,445 | import re
from lxml import etree, html
def _parse(source, beautifulsoup, makeelement, **bsargs):
if beautifulsoup is None:
beautifulsoup = BeautifulSoup
if hasattr(beautifulsoup, "HTML_ENTITIES"): # bs3
if 'convertEntities' not in bsargs:
bsargs['convertEntities'] = 'html'
if hasattr(beautifulsoup, "DEFAULT_BUILDER_FEATURES"): # bs4
if 'features' not in bsargs:
bsargs['features'] = 'html.parser' # use Python html parser
tree = beautifulsoup(source, **bsargs)
root = _convert_tree(tree, makeelement)
# from ET: wrap the document in a html root element, if necessary
if len(root) == 1 and root[0].tag == "html":
return root[0]
root.tag = "html"
return root
The provided code snippet includes necessary dependencies for implementing the `parse` function. Write a Python function `def parse(file, beautifulsoup=None, makeelement=None, **bsargs)` to solve the following problem:
Parse a file into an ElemenTree using the BeautifulSoup parser. You can pass a different BeautifulSoup parser through the `beautifulsoup` keyword, and a diffent Element factory function through the `makeelement` keyword. By default, the standard ``BeautifulSoup`` class and the default factory of `lxml.html` are used.
Here is the function:
def parse(file, beautifulsoup=None, makeelement=None, **bsargs):
"""Parse a file into an ElemenTree using the BeautifulSoup parser.
You can pass a different BeautifulSoup parser through the
`beautifulsoup` keyword, and a diffent Element factory function
through the `makeelement` keyword. By default, the standard
``BeautifulSoup`` class and the default factory of `lxml.html` are
used.
"""
if not hasattr(file, 'read'):
file = open(file)
root = _parse(file, beautifulsoup, makeelement, **bsargs)
return etree.ElementTree(root) | Parse a file into an ElemenTree using the BeautifulSoup parser. You can pass a different BeautifulSoup parser through the `beautifulsoup` keyword, and a diffent Element factory function through the `makeelement` keyword. By default, the standard ``BeautifulSoup`` class and the default factory of `lxml.html` are used. |
171,446 | import re
from lxml import etree, html
def _convert_tree(beautiful_soup_tree, makeelement):
if makeelement is None:
makeelement = html.html_parser.makeelement
# Split the tree into three parts:
# i) everything before the root element: document type
# declaration, comments, processing instructions, whitespace
# ii) the root(s),
# iii) everything after the root: comments, processing
# instructions, whitespace
first_element_idx = last_element_idx = None
html_root = declaration = None
for i, e in enumerate(beautiful_soup_tree):
if isinstance(e, Tag):
if first_element_idx is None:
first_element_idx = i
last_element_idx = i
if html_root is None and e.name and e.name.lower() == 'html':
html_root = e
elif declaration is None and isinstance(e, _DECLARATION_OR_DOCTYPE):
declaration = e
# For a nice, well-formatted document, the variable roots below is
# a list consisting of a single <html> element. However, the document
# may be a soup like '<meta><head><title>Hello</head><body>Hi
# all<\p>'. In this example roots is a list containing meta, head
# and body elements.
if first_element_idx is None:
pre_root = post_root = []
roots = beautiful_soup_tree.contents
else:
pre_root = beautiful_soup_tree.contents[:first_element_idx]
roots = beautiful_soup_tree.contents[first_element_idx:last_element_idx+1]
post_root = beautiful_soup_tree.contents[last_element_idx+1:]
# Reorganize so that there is one <html> root...
if html_root is not None:
# ... use existing one if possible, ...
i = roots.index(html_root)
html_root.contents = roots[:i] + html_root.contents + roots[i+1:]
else:
# ... otherwise create a new one.
html_root = _PseudoTag(roots)
convert_node = _init_node_converters(makeelement)
# Process pre_root
res_root = convert_node(html_root)
prev = res_root
for e in reversed(pre_root):
converted = convert_node(e)
if converted is not None:
prev.addprevious(converted)
prev = converted
# ditto for post_root
prev = res_root
for e in post_root:
converted = convert_node(e)
if converted is not None:
prev.addnext(converted)
prev = converted
if declaration is not None:
try:
# bs4 provides full Doctype string
doctype_string = declaration.output_ready()
except AttributeError:
doctype_string = declaration.string
match = _parse_doctype_declaration(doctype_string)
if not match:
# Something is wrong if we end up in here. Since soupparser should
# tolerate errors, do not raise Exception, just let it pass.
pass
else:
external_id, sys_uri = match.groups()
docinfo = res_root.getroottree().docinfo
# strip quotes and update DOCTYPE values (any of None, '', '...')
docinfo.public_id = external_id and external_id[1:-1]
docinfo.system_url = sys_uri and sys_uri[1:-1]
return res_root
The provided code snippet includes necessary dependencies for implementing the `convert_tree` function. Write a Python function `def convert_tree(beautiful_soup_tree, makeelement=None)` to solve the following problem:
Convert a BeautifulSoup tree to a list of Element trees. Returns a list instead of a single root Element to support HTML-like soup with more than one root element. You can pass a different Element factory through the `makeelement` keyword.
Here is the function:
def convert_tree(beautiful_soup_tree, makeelement=None):
"""Convert a BeautifulSoup tree to a list of Element trees.
Returns a list instead of a single root Element to support
HTML-like soup with more than one root element.
You can pass a different Element factory through the `makeelement`
keyword.
"""
root = _convert_tree(beautiful_soup_tree, makeelement)
children = root.getchildren()
for child in children:
root.remove(child)
return children | Convert a BeautifulSoup tree to a list of Element trees. Returns a list instead of a single root Element to support HTML-like soup with more than one root element. You can pass a different Element factory through the `makeelement` keyword. |
171,447 | from lxml import etree
def default_loader(href, parse, encoding=None):
file = open(href, 'rb')
if parse == "xml":
data = etree.parse(file).getroot()
else:
data = file.read()
if not encoding:
encoding = 'utf-8'
data = data.decode(encoding)
file.close()
return data | null |
171,448 | from lxml import etree
DEFAULT_MAX_INCLUSION_DEPTH = 6
def _include(elem, loader=None, base_url=None,
max_depth=DEFAULT_MAX_INCLUSION_DEPTH, _parent_hrefs=None):
if loader is not None:
load_include = _wrap_et_loader(loader)
else:
load_include = _lxml_default_loader
if _parent_hrefs is None:
_parent_hrefs = set()
parser = elem.getroottree().parser
include_elements = list(
elem.iter(XINCLUDE_ITER_TAG))
for e in include_elements:
if e.tag == XINCLUDE_INCLUDE:
# process xinclude directive
href = urljoin(base_url, e.get("href"))
parse = e.get("parse", "xml")
parent = e.getparent()
if parse == "xml":
if href in _parent_hrefs:
raise FatalIncludeError(
"recursive include of %r detected" % href
)
if max_depth == 0:
raise LimitedRecursiveIncludeError(
"maximum xinclude depth reached when including file %s" % href)
node = load_include(href, parse, parser=parser)
if node is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
node = _include(node, loader, href, max_depth - 1, {href} | _parent_hrefs)
if e.tail:
node.tail = (node.tail or "") + e.tail
if parent is None:
return node # replaced the root node!
parent.replace(e, node)
elif parse == "text":
text = load_include(href, parse, encoding=e.get("encoding"))
if text is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
predecessor = e.getprevious()
if predecessor is not None:
predecessor.tail = (predecessor.tail or "") + text
elif parent is None:
return text # replaced the root node!
else:
parent.text = (parent.text or "") + text + (e.tail or "")
parent.remove(e)
else:
raise FatalIncludeError(
"unknown parse type in xi:include tag (%r)" % parse
)
elif e.tag == XINCLUDE_FALLBACK:
parent = e.getparent()
if parent is not None and parent.tag != XINCLUDE_INCLUDE:
raise FatalIncludeError(
"xi:fallback tag must be child of xi:include (%r)" % e.tag
)
else:
raise FatalIncludeError(
"Invalid element found in XInclude namespace (%r)" % e.tag
)
return elem
def include(elem, loader=None, base_url=None,
max_depth=DEFAULT_MAX_INCLUSION_DEPTH):
if max_depth is None:
max_depth = -1
elif max_depth < 0:
raise ValueError("expected non-negative depth or None for 'max_depth', got %r" % max_depth)
if base_url is None:
if hasattr(elem, 'getroot'):
tree = elem
elem = elem.getroot()
else:
tree = elem.getroottree()
if hasattr(tree, 'docinfo'):
base_url = tree.docinfo.URL
elif hasattr(elem, 'getroot'):
elem = elem.getroot()
_include(elem, loader, base_url, max_depth) | null |
171,449 | from __future__ import absolute_import
from xml.sax.handler import ContentHandler
from lxml import etree
from lxml.etree import ElementTree, SubElement
from lxml.etree import Comment, ProcessingInstruction
def _getNsTag(tag):
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return None, tag | null |
171,450 | from __future__ import absolute_import
from xml.sax.handler import ContentHandler
from lxml import etree
from lxml.etree import ElementTree, SubElement
from lxml.etree import Comment, ProcessingInstruction
class ElementTreeProducer(object):
"""Produces SAX events for an element and children.
"""
def __init__(self, element_or_tree, content_handler):
try:
element = element_or_tree.getroot()
except AttributeError:
element = element_or_tree
self._element = element
self._content_handler = content_handler
from xml.sax.xmlreader import AttributesNSImpl as attr_class
self._attr_class = attr_class
self._empty_attributes = attr_class({}, {})
def saxify(self):
self._content_handler.startDocument()
element = self._element
if hasattr(element, 'getprevious'):
siblings = []
sibling = element.getprevious()
while getattr(sibling, 'tag', None) is ProcessingInstruction:
siblings.append(sibling)
sibling = sibling.getprevious()
for sibling in siblings[::-1]:
self._recursive_saxify(sibling, {})
self._recursive_saxify(element, {})
if hasattr(element, 'getnext'):
sibling = element.getnext()
while getattr(sibling, 'tag', None) is ProcessingInstruction:
self._recursive_saxify(sibling, {})
sibling = sibling.getnext()
self._content_handler.endDocument()
def _recursive_saxify(self, element, parent_nsmap):
content_handler = self._content_handler
tag = element.tag
if tag is Comment or tag is ProcessingInstruction:
if tag is ProcessingInstruction:
content_handler.processingInstruction(
element.target, element.text)
tail = element.tail
if tail:
content_handler.characters(tail)
return
element_nsmap = element.nsmap
new_prefixes = []
if element_nsmap != parent_nsmap:
# There have been updates to the namespace
for prefix, ns_uri in element_nsmap.items():
if parent_nsmap.get(prefix) != ns_uri:
new_prefixes.append( (prefix, ns_uri) )
attribs = element.items()
if attribs:
attr_values = {}
attr_qnames = {}
for attr_ns_name, value in attribs:
attr_ns_tuple = _getNsTag(attr_ns_name)
attr_values[attr_ns_tuple] = value
attr_qnames[attr_ns_tuple] = self._build_qname(
attr_ns_tuple[0], attr_ns_tuple[1], element_nsmap,
preferred_prefix=None, is_attribute=True)
sax_attributes = self._attr_class(attr_values, attr_qnames)
else:
sax_attributes = self._empty_attributes
ns_uri, local_name = _getNsTag(tag)
qname = self._build_qname(
ns_uri, local_name, element_nsmap, element.prefix, is_attribute=False)
for prefix, uri in new_prefixes:
content_handler.startPrefixMapping(prefix, uri)
content_handler.startElementNS(
(ns_uri, local_name), qname, sax_attributes)
text = element.text
if text:
content_handler.characters(text)
for child in element:
self._recursive_saxify(child, element_nsmap)
content_handler.endElementNS((ns_uri, local_name), qname)
for prefix, uri in new_prefixes:
content_handler.endPrefixMapping(prefix)
tail = element.tail
if tail:
content_handler.characters(tail)
def _build_qname(self, ns_uri, local_name, nsmap, preferred_prefix, is_attribute):
if ns_uri is None:
return local_name
if not is_attribute and nsmap.get(preferred_prefix) == ns_uri:
prefix = preferred_prefix
else:
# Pick the first matching prefix, in alphabetical order.
candidates = [
pfx for (pfx, uri) in nsmap.items()
if pfx is not None and uri == ns_uri
]
prefix = (
candidates[0] if len(candidates) == 1
else min(candidates) if candidates
else None
)
if prefix is None:
# Default namespace
return local_name
return prefix + ':' + local_name
The provided code snippet includes necessary dependencies for implementing the `saxify` function. Write a Python function `def saxify(element_or_tree, content_handler)` to solve the following problem:
One-shot helper to generate SAX events from an XML tree and fire them against a SAX ContentHandler.
Here is the function:
def saxify(element_or_tree, content_handler):
"""One-shot helper to generate SAX events from an XML tree and fire
them against a SAX ContentHandler.
"""
return ElementTreeProducer(element_or_tree, content_handler).saxify() | One-shot helper to generate SAX events from an XML tree and fire them against a SAX ContentHandler. |
171,451 | from __future__ import absolute_import
from . import etree
def _make_lower_case(context, s):
return s.lower() | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.