text stringlengths 0 1.05M | meta dict |
|---|---|
__all__ = [
'deps',
]
import collections
import logging
import urllib.parse
from pathlib import Path
from tempfile import TemporaryDirectory
from garage import apps
from garage import scripts
LOG = logging.getLogger(__name__)
# TODO: Use typing.NamedTuple and the new annotation syntax when all
# systems are upgraded to Python 3.6
Package = collections.namedtuple('Package', [
'name',
# TODO: Support multiple versions
'version',
'uri',
'checksum',
'strip_components',
'install',
])
PACKAGES = {}
def define_package(**kwargs):
def decorate(install):
package = Package(name=install.__name__, install=install, **kwargs)
PACKAGES[package.name] = package
return install
return decorate
@define_package(
version='1.30.0',
uri='https://github.com/rkt/rkt/releases/download/v1.30.0/rkt-v1.30.0.tar.gz',
checksum='sha512-f5451234761825380179122a73799864c155b68cc52ce204e4361f60b87bdf8a48fb9c9b991d7f600d8bf8948c4bb2ed118bb5874eb9fc7699620913c2ace754',
strip_components=1,
)
def rkt(package):
if Path('/usr/bin/rkt').exists():
LOG.warning('attempt to overwrite /usr/bin/rkt')
cmds = [
# Don't install api and metadata service for now.
'cp init/systemd/rkt-gc.service /lib/systemd/system'.split(),
'cp init/systemd/rkt-gc.timer /lib/systemd/system'.split(),
'cp init/systemd/tmpfiles.d/rkt.conf /usr/lib/tmpfiles.d'.split(),
'./scripts/setup-data-dir.sh'.split(),
# Install rkt only if everything above succeeds.
'cp rkt /usr/bin'.split(),
# Fetch stage 1.
['rkt', 'trust',
'--prefix', 'coreos.com/rkt/stage1-coreos',
'--skip-fingerprint-review'],
['rkt', 'fetch', 'coreos.com/rkt/stage1-coreos:' + package.version],
]
with scripts.using_sudo():
for cmd in cmds:
scripts.execute(cmd)
scripts.systemctl_enable('rkt-gc.timer')
scripts.systemctl_start('rkt-gc.timer')
@apps.with_prog('list')
@apps.with_help('list supported external packages')
def list_(_):
"""List supported external packages."""
for package_name in sorted(PACKAGES):
package = PACKAGES[package_name]
print('%s:%s' % (package_name, package.version))
return 0
@apps.with_help('install external package')
@apps.with_argument(
'--tarball', metavar='PATH',
help='use local tarball instead',
)
@apps.with_argument(
'package',
help='choose package (format: "name:version")',
)
def install(args):
"""Install external package."""
package_name, package_version = args.package.split(':', maxsplit=1)
package = PACKAGES.get(package_name)
if package is None:
raise RuntimeError('unknown package: %s' % args.package)
if package_version != 'latest' and package_version != package.version:
raise RuntimeError('unsupported package version: %s' % args.package)
with TemporaryDirectory() as staging_dir:
staging_dir = Path(staging_dir)
if args.tarball:
tarball_path = scripts.ensure_file(Path(args.tarball).resolve())
else:
tarball_path = urllib.parse.urlparse(package.uri).path
tarball_path = staging_dir / Path(tarball_path).name
scripts.wget(package.uri, tarball_path)
scripts.ensure_checksum(tarball_path, package.checksum)
with scripts.directory(staging_dir):
if package.strip_components > 0:
tar_extra_flags = [
'--strip-components', package.strip_components,
]
else:
tar_extra_flags = []
scripts.tar_extract(tarball_path, tar_extra_flags=tar_extra_flags)
package.install(package)
return 0
@apps.with_help('manage external dependencies')
@apps.with_defaults(no_locking_required=True)
@apps.with_apps(
'operation', 'operation on external dependencies',
list_,
install,
)
def deps(args):
"""\
Manage external dependencies that will not be installed from distro
package manager.
"""
return args.operation(args)
| {
"repo_name": "clchiou/garage",
"path": "py/ops/ops/onboard/deps.py",
"copies": "1",
"size": "4137",
"license": "mit",
"hash": 212621806671744420,
"line_mean": 29.197080292,
"line_max": 151,
"alpha_frac": 0.645636935,
"autogenerated": false,
"ratio": 3.6225919439579686,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47682288789579685,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'DictBlocker',
'ForeverBlocker',
'TaskCompletionBlocker',
'TimeoutBlocker',
]
import heapq
from g1.bases.assertions import ASSERT
from . import tasks
class BlockerBase:
"""Abstract blocker interface.
This tracks which-task-is-blocked-by-what relationship.
"""
def __bool__(self):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __iter__(self):
raise NotImplementedError
def block(self, source, task):
"""Register that ``task`` is blocked by ``source``."""
raise NotImplementedError
def unblock(self, source):
"""Unblock all ``task`` blocked by ``source``.
Return all unblocked tasks.
"""
raise NotImplementedError
def cancel(self, task):
"""Cancel blocking on ``task``.
Return source or true if ``task`` actually cancelled.
"""
raise NotImplementedError
class ForeverBlocker(BlockerBase):
"""Blocker that never unblocks."""
def __init__(self):
self._tasks = set()
def __bool__(self):
return bool(self._tasks)
def __len__(self):
return len(self._tasks)
def __iter__(self):
return iter(self._tasks)
def block(self, source, task):
del source # Unused.
self._tasks.add(task)
def unblock(self, source):
return ()
def cancel(self, task):
if task in self._tasks:
self._tasks.remove(task)
return True
else:
return False
class DictBlocker(BlockerBase):
"""Blocker implemented by ``dict``.
NOTE: It cannot track a task being blocked by more than one source.
"""
def __init__(self):
self._task_to_source = {}
# Reverse look-up table for faster ``unblock``.
self._source_to_tasks = {}
def __bool__(self):
return bool(self._task_to_source)
def __len__(self):
return len(self._task_to_source)
def __iter__(self):
return iter(self._task_to_source)
def block(self, source, task):
ASSERT.not_none(source)
ASSERT.not_in(task, self._task_to_source)
self._task_to_source[task] = source
# Update reverse look-up table.
lookup = self._source_to_tasks.get(source)
if lookup is None:
lookup = self._source_to_tasks[source] = set()
lookup.add(task)
def unblock(self, source):
lookup = self._source_to_tasks.pop(source, ())
for task in lookup:
self._task_to_source.pop(task)
return lookup
def cancel(self, task):
source = self._task_to_source.pop(task, None)
if source is not None:
lookup = self._source_to_tasks[source]
lookup.discard(task)
if not lookup:
self._source_to_tasks.pop(source)
return source
class TaskCompletionBlocker(DictBlocker):
"""Track all tasks blocked in ``join`` calls."""
def block(self, source, task):
"""Record that ``task`` is joining on ``source`` task."""
ASSERT.isinstance(source, tasks.Task)
ASSERT.is_not(source, task) # A task can't join on itself.
ASSERT.false(source.is_completed())
return super().block(source, task)
class TimeoutBlocker(BlockerBase):
class Item:
__slots__ = ('source', 'task')
def __init__(self, source, task):
self.source = source
self.task = task
def __lt__(self, other):
return self.source < other.source
def __init__(self):
self._tasks = set()
self._queue = []
def __bool__(self):
return bool(self._tasks)
def __len__(self):
return len(self._tasks)
def __iter__(self):
return iter(self._tasks)
def block(self, source, task):
ASSERT.isinstance(source, (int, float))
ASSERT.not_in(task, self._tasks)
heapq.heappush(self._queue, self.Item(source, task))
self._tasks.add(task)
def unblock(self, source):
unblocked = []
while self._queue and self._queue[0].source <= source:
task = heapq.heappop(self._queue).task
if task in self._tasks:
unblocked.append(task)
self._tasks.discard(task)
return unblocked
def cancel(self, task):
if task in self._tasks:
self._tasks.discard(task)
return True
else:
return False
def get_min_timeout(self, now):
if self._queue:
return self._queue[0].source - now
else:
return None
| {
"repo_name": "clchiou/garage",
"path": "py/g1/asyncs/kernels/g1/asyncs/kernels/blockers.py",
"copies": "1",
"size": "4676",
"license": "mit",
"hash": -7659171392564091000,
"line_mean": 23.8723404255,
"line_max": 71,
"alpha_frac": 0.5660821215,
"autogenerated": false,
"ratio": 3.9863597612958226,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 188
} |
__all__ = ['ndimage_to_list',
'list_to_ndimage']
import numpy as np
# import itk as itk
from ..core import ants_image as iio
from ..core import ants_image_io as iio2
from .. import utils
def list_to_ndimage( image, image_list ):
"""
Merge list of multiple scalar ANTsImage types of dimension into one
ANTsImage of dimension plus one
ANTsR function: `mergeListToNDImage`
Arguments
---------
image : target image space
image_list : list/tuple of ANTsImage types
scalar images to merge into target image space
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('r16'))
>>> image2 = ants.image_read(ants.get_ants_data('r16'))
>>> imageTar = ants.make_image( ( *image2.shape, 2 ) )
>>> image3 = ants.list_to_ndimage( imageTar, [image,image2])
>>> image3.dimension == 3
"""
inpixeltype = image_list[0].pixeltype
dimension = image_list[0].dimension
components = len(image_list)
for imageL in image_list:
if not isinstance(imageL, iio.ANTsImage):
raise ValueError('list may only contain ANTsImage objects')
if image.pixeltype != inpixeltype:
raise ValueError('all images must have the same pixeltype')
dimensionout = ( *image_list[0].shape, len( image_list ) )
newImage = iio2.make_image(
dimensionout,
spacing = iio.get_spacing( image ),
origin = iio.get_origin( image ),
direction = iio.get_direction( image ),
pixeltype = inpixeltype
)
# FIXME - should implement paste image filter from ITK
for x in range( len( image_list ) ):
if dimension == 2:
newImage[:,:,x] = image_list[x][:,:]
if dimension == 3:
newImage[:,:,:,x] = image_list[x][:,:,:]
return newImage
def ndimage_to_list(image):
"""
Split a n dimensional ANTsImage into a list
of n-1 dimensional ANTsImages
Arguments
---------
image : ANTsImage
n-dimensional image to split
Returns
-------
list of ANTsImage types
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('r16'))
>>> image2 = ants.image_read(ants.get_ants_data('r16'))
>>> imageTar = ants.make_image( ( *image2.shape, 2 ) )
>>> image3 = ants.list_to_ndimage( imageTar, [image,image2])
>>> image3.dimension == 3
>>> images_unmerged = ants.ndimage_to_list( image3 )
>>> len(images_unmerged) == 2
>>> images_unmerged[0].dimension == 2
"""
inpixeltype = image.pixeltype
dimension = image.dimension
components = 1
imageShape = image.shape
nSections = imageShape[ dimension - 1 ]
subdimension = dimension - 1
suborigin = iio.get_origin( image )[0:subdimension]
subspacing = iio.get_spacing( image )[0:subdimension]
subdirection = np.eye( subdimension )
for i in range( subdimension ):
subdirection[i,:] = iio.get_direction( image )[i,0:subdimension]
subdim = image.shape[ 0:subdimension ]
imagelist = []
for i in range( nSections ):
img = utils.slice_image( image, axis = subdimension, idx = i )
iio.set_spacing( img, subspacing )
iio.set_origin( img, suborigin )
iio.set_direction( img, subdirection )
imagelist.append( img )
return imagelist
| {
"repo_name": "ANTsX/ANTsPy",
"path": "ants/utils/ndimage_to_list.py",
"copies": "1",
"size": "3400",
"license": "apache-2.0",
"hash": -5701864576605256000,
"line_mean": 29.0884955752,
"line_max": 72,
"alpha_frac": 0.6132352941,
"autogenerated": false,
"ratio": 3.549060542797495,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4662295836897495,
"avg_score": null,
"num_lines": null
} |
__all__ = [
"dispatcher",
]
import asyncore
import inotify
class dispatcher(asyncore.file_dispatcher):
""" Subclasses can monitor inotify watch events by overriding the
handle_watch(event) method. """
def __init__(self, flags=0, bufsize=65536, map=None):
""" Initialize an inotify event queue and register it with
asyncore. flags is passed to inotify.init(). bufsize is
used when receiving events."""
fd = inotify.init(flags)
asyncore.file_dispatcher.__init__(self, fd, map)
self.bufsize = bufsize
def add_watch(self, name, mask):
""" Calls inotify.add_watch() for the event queue. """
return inotify.add_watch(self.socket.fileno(), name, mask)
def rm_watch(self, wd):
""" Calls inotify.rm_watch() for the event queue. """
inotify.rm_watch(self.socket.fileno(), wd)
def handle_watch(self, event):
""" Process your watch events here. event is an inotify.event
class instance."""
self.log_info("unhandled watch event", "warning")
def handle_read(self):
buf = self.recv(self.bufsize)
if buf:
for event in inotify.unpack_events(buf):
self.handle_watch(event)
else:
self.close()
def writable(self):
return False
| {
"repo_name": "tsavola/pyinotify-basic",
"path": "inotify/async.py",
"copies": "1",
"size": "1352",
"license": "mit",
"hash": 8034155994563019000,
"line_mean": 27.1666666667,
"line_max": 70,
"alpha_frac": 0.6013313609,
"autogenerated": false,
"ratio": 3.907514450867052,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 48
} |
__all__ = [
"dmap_data_types", "dmap_names", "dmap_reverse_data_types",
"dmap_code_types"]
dmap_code_types = {
"abal": ("daap.browsealbumlisting", 12),
"abar": ("daap.browseartistlisting", 12),
"abcp": ("daap.browsecomposerlisting", 12),
"abgn": ("daap.browsegenrelisting", 12),
"abpl": ("daap.baseplaylist", 1),
"abro": ("daap.databasebrowse", 12),
"adbs": ("daap.databasesongs", 12),
"aeAI": ("com.apple.itunes.itms-artistid", 5),
"aeCI": ("com.apple.itunes.itms-composerid", 5),
"aeEN": ("com.apple.itunes.episode-num-str", 9),
"aeES": ("com.apple.itunes.episode-sort", 5),
"aeFP": ("com.apple.itunes.req-fplay", 1),
"aeGD": ("com.apple.itunes.gapless-enc-dr", 5),
"aeGE": ("com.apple.itunes.gapless-enc-del", 5),
"aeGH": ("com.apple.itunes.gapless-heur", 5),
"aeGI": ("com.apple.itunes.itms-genreid", 5),
"aeGR": ("com.apple.itunes.gapless-resy", 7),
"aeGU": ("com.apple.itunes.gapless-dur", 7),
"aeHV": ("com.apple.itunes.has-video", 1),
"aeMK": ("com.apple.itunes.mediakind", 1),
"aeMQ": ("com.apple.itunes.unknown-MQ", 1),
"aeNN": ("com.apple.itunes.network-name", 9),
"aeNV": ("com.apple.itunes.norm-volume", 5),
"aePC": ("com.apple.itunes.is-podcast", 1),
"aePI": ("com.apple.itunes.itms-playlistid", 5),
"aePP": ("com.apple.itunes.is-podcast-playlist", 1),
"aePS": ("com.apple.itunes.special-playlist", 1),
"aeSF": ("com.apple.itunes.itms-storefrontid", 5),
"aeSI": ("com.apple.itunes.itms-songid", 5),
"aeSL": ("com.apple.itunes.unknown-SL", 1),
"aeSN": ("com.apple.itunes.series-name", 9),
"aeSP": ("com.apple.itunes.smart-playlist", 1),
"aeSR": ("com.apple.itunes.unknown-SR", 1),
"aeSU": ("com.apple.itunes.season-num", 5),
"aeSV": ("com.apple.itunes.music-sharing-version", 5),
"aeTr": ("com.apple.itunes.unknown-Tr", 1),
"agrp": ("daap.songgrouping", 9),
"aply": ("daap.databaseplaylists", 12),
"aprm": ("daap.playlistrepeatmode", 1),
"apro": ("daap.protocolversion", 11),
"apsm": ("daap.playlistshufflemode", 1),
"apso": ("daap.playlistsongs", 12),
"arif": ("daap.resolveinfo", 12),
"arsv": ("daap.resolve", 12),
"asaa": ("daap.songalbumartist", 9),
"asac": ("daap.songartworkcount", 3),
"asal": ("daap.songalbum", 9),
"asar": ("daap.songartist", 9),
"asbk": ("daap.bookmarkable", 1),
"asbo": ("daap.songbookmark", 5),
"asbr": ("daap.songbitrate", 3),
"asbt": ("daap.songbeatsperminute", 3),
"ascd": ("daap.songcodectype", 5),
"ascm": ("daap.songcomment", 9),
"ascn": ("daap.songcontentdescription", 9),
"asco": ("daap.songcompilation", 1),
"ascp": ("daap.songcomposer", 9),
"ascr": ("daap.songcontentrating", 1),
"ascs": ("daap.songcodecsubtype", 5),
"asct": ("daap.songcategory", 9),
"asda": ("daap.songdateadded", 10),
"asdb": ("daap.songdisabled", 1),
"asdc": ("daap.songdisccount", 3),
"asdk": ("daap.songdatakind", 1),
"asdm": ("daap.songdatemodified", 10),
"asdn": ("daap.songdiscnumber", 3),
"asdp": ("daap.songdatepurchased", 10),
"asdr": ("daap.songdatereleased", 10),
"asdt": ("daap.songdescription", 9),
"ased": ("daap.songextradata", 3),
"aseq": ("daap.songeqpreset", 9),
"asfm": ("daap.songformat", 9),
"asgn": ("daap.songgenre", 9),
"asgp": ("daap.songgapless", 1),
"asgr": ("daap.supportsgroups", 4),
"ashp": ("daap.songhasbeenplayed", 1),
"asky": ("daap.songkeywords", 9),
"aslc": ("daap.songlongcontentdescription", 9),
"asrv": ("daap.songrelativevolume", 2),
"assa": ("daap.sortartist", 9),
"assc": ("daap.sortcomposer", 9),
"assl": ("daap.sortalbumartist", 9),
"assn": ("daap.sortname", 9),
"assp": ("daap.songstoptime", 5),
"assr": ("daap.songsamplerate", 5),
"asss": ("daap.sortseriesname", 9),
"asst": ("daap.songstarttime", 5),
"assu": ("daap.sortalbum", 9),
"assz": ("daap.songsize", 5),
"astc": ("daap.songtrackcount", 3),
"astm": ("daap.songtime", 5),
"astn": ("daap.songtracknumber", 3),
"asul": ("daap.songdataurl", 9),
"asur": ("daap.songuserrating", 1),
"asyr": ("daap.songyear", 3),
"ated": ("daap.supportsextradata", 3),
"avdb": ("daap.serverdatabases", 12),
"mbcl": ("dmap.bag", 12),
"mccr": ("dmap.contentcodesresponse", 12),
"mcna": ("dmap.contentcodesname", 9),
"mcnm": ("dmap.contentcodesnumber", 5),
"mcon": ("dmap.container", 12),
"mctc": ("dmap.containercount", 5),
"mcti": ("dmap.containeritemid", 5),
"mcty": ("dmap.contentcodestype", 3),
"mdcl": ("dmap.dictionary", 12),
"miid": ("dmap.itemid", 5),
"mikd": ("dmap.itemkind", 1),
"mimc": ("dmap.itemcount", 5),
"minm": ("dmap.itemname", 9),
"mlcl": ("dmap.listing", 12),
"mlid": ("dmap.sessionid", 5),
"mlit": ("dmap.listingitem", 12),
"mlog": ("dmap.loginresponse", 12),
"mpco": ("dmap.parentcontainerid", 5),
"mper": ("dmap.persistentid", 7),
"mpro": ("dmap.protocolversion", 11),
"mrco": ("dmap.returnedcount", 5),
"msal": ("dmap.supportsautologout", 1),
"msas": ("dmap.authenticationschemes", 5),
"msau": ("dmap.authenticationmethod", 1),
"msbr": ("dmap.supportsbrowse", 1),
"msdc": ("dmap.databasescount", 5),
"msed": ("dmap.supportsedit", 1),
"msex": ("dmap.supportsextensions", 1),
"msix": ("dmap.supportsindex", 1),
"mslr": ("dmap.loginrequired", 1),
"mspi": ("dmap.supportspersistentids", 1),
"msqy": ("dmap.supportsquery", 1),
"msrs": ("dmap.supportsresolve", 1),
"msrv": ("dmap.serverinforesponse", 12),
"mstc": ("dmap.utctime", 10),
"mstm": ("dmap.timeoutinterval", 5),
"msto": ("dmap.utcoffset", 6),
"msts": ("dmap.statusstring", 9),
"mstt": ("dmap.status", 5),
"msup": ("dmap.supportsupdate", 1),
"mtco": ("dmap.specifiedtotalcount", 5),
"mudl": ("dmap.deletedidlisting", 12),
"mupd": ("dmap.updateresponse", 12),
"musr": ("dmap.serverrevision", 5),
"muty": ("dmap.updatetype", 1),
"ppro": ("dpap.protocolversion", 5),
"pret": ("dpap.unknown", 12),
}
dmap_data_types = {
1: "b", # byte
2: "ub", # unsigned byte
3: "h", # short
4: "uh", # unsigned short
5: "i", # integer
6: "ui", # unsigned integer
7: "l", # long
8: "ul", # unsigned long
9: "s", # string
10: "t", # timestamp
11: "v", # version
12: "c", # container
}
dmap_names = {
dmap_code_types[k][0]: k for k in dmap_code_types
}
dmap_reverse_data_types = {
dmap_data_types[k]: k for k in dmap_data_types
}
| {
"repo_name": "basilfx/flask-daapserver",
"path": "daapserver/daap_data.py",
"copies": "2",
"size": "6648",
"license": "mit",
"hash": 4593596000581459500,
"line_mean": 37.4277456647,
"line_max": 63,
"alpha_frac": 0.573856799,
"autogenerated": false,
"ratio": 2.6132075471698113,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4187064346169811,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'downsample_tile',
'fill_micropolygon_mesh',
'fill_bounds_buffer',
'generate_numpy_begin',
'generate_numpy_span',
'Rectangle',
'print_coordinates',
'print_vertices',
]
import os
import ctypes
import functools
import sys
from pathlib import Path
memoize = functools.lru_cache()
@memoize
def build_capi_lib():
from phillip.build import build_so, generate_extension_args, load_library
here = Path(__file__).parent
sources = list(map(str, [
here / '_capi.cpp',
here / 'cpp_src' / 'RationalBilinearInverter.cpp',
]))
extension_args = generate_extension_args(DLL_FUNCS)
if sys.platform in ('linux', 'linux2', 'darwin'):
extension_args['extra_compile_args'] = [ '-std=c++11' ]
elif sys.platform in ('win32'):
extension_args['extra_compile_args'] = [ '/std:c++14' ]
extension_args['include_dirs'] = [ str(here / 'cpp_src') ]
so_path = build_so(
'handsome_capi', str(here),
sources, extension_args
)
return load_library(so_path)
class c_void_p(ctypes.c_void_p):
pass
NULL_PTR = ctypes.c_void_p(0)
def generate_numpy_begin(array):
return c_void_p(array.ctypes.data)
def generate_numpy_span(array):
from functools import reduce
from operator import mul
begin = generate_numpy_begin(array)
item_length = reduce(mul, array.shape)
byte_length = item_length * array.itemsize
end = c_void_p(begin.value + byte_length)
return begin, end
class Rectangle(ctypes.Structure):
_fields_ = [
('left' , ctypes.c_float),
('bottom', ctypes.c_float),
('right' , ctypes.c_float),
('top' , ctypes.c_float),
]
def __unicode__(self):
return 'Rectangle(left={}, bottom={}, right={}, top={})'.format(
self.left, self.bottom, self.right, self.top
)
def __repr__(self):
return 'Rectangle(left={}, bottom={}, right={}, top={})'.format(
self.left, self.bottom, self.right, self.top
)
DLL_FUNCS = [
'downsample_tile',
'fill_micropolygon_mesh',
'fill_bounds_buffer',
'print_coordinates',
'print_vertices',
]
def update_globals():
lib = build_capi_lib()
functions = {
name : lib[name] for name in DLL_FUNCS
}
functions['fill_bounds_buffer'].restype = Rectangle
globals().update(functions)
update_globals()
| {
"repo_name": "bracket/rasterizer",
"path": "handsome/capi.py",
"copies": "2",
"size": "2421",
"license": "bsd-2-clause",
"hash": -5743713711062450000,
"line_mean": 21.6261682243,
"line_max": 77,
"alpha_frac": 0.6055349029,
"autogenerated": false,
"ratio": 3.386013986013986,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9967882863726637,
"avg_score": 0.004733205037469714,
"num_lines": 107
} |
__all__ = [
'DowntimeApi',
'MonitorApi',
'MonitorType',
]
from dogapi.constants import MonitorType
from dogapi.exceptions import ApiError
class MonitorApi(object):
def monitor(self, mtype, query, name=None, message=None, options=None):
"""
Create a new monitor of type *mtype* with the given *name* and *query*.
The *message* will accompany any notifications sent for the alert and
can contain the same '@' notation as events to alert individual users.
The *options* argument is a dictionary of settings for the monitor.
See the Datadog API documentation for a break down of available options.
>>> dog_http_api.monitor("metric alert", "sum(last_1d):sum:system.net.bytes_rcvd{host:host0} > 100")
"""
mtype = mtype.lower()
if mtype not in MonitorType.ALL:
raise ApiError('Invalid monitor type, expected one of: %s' \
% ', '.join(MonitorType.ALL))
body = {
'type': mtype,
'query': query,
}
if name:
body['name'] = name
if message:
body['message'] = message
if options:
if not isinstance(options, dict):
raise ApiError('Invalid type for `options`, expected `dict`.')
body['options'] = options
return self.http_request('POST', '/monitor', body,
response_formatter=lambda x: x['id'],
)
def update_monitor(self, monitor_id, query, name=None, message=None,
options=None):
"""
Update the monitor identified by *monitor_id* with the given *query*.
The *message* will accompany any notifications sent for the alert and
can contain the same '@' notation as events to alert individual users.
The *options* argument is a dictionary of settings for the monitor.
See the Datadog API documentation for a break down of available options.
>>> dog_http_api.update_monitor(1234, "sum(last_1d):sum:system.net.bytes_rcvd{host:host0} > 200")
"""
body = {
'query': query
}
if name:
body['name'] = name
if message:
body['message'] = message
if options:
body['options'] = options
return self.http_request('PUT', '/monitor/%s' % monitor_id, body,
response_formatter=lambda x: x['id'],
)
def get_monitor(self, monitor_id, group_states=None):
"""
Get the details for the monitor identified by *monitor_id*.
*group_states* is optionally a list of statuses chosen from "all", "ok",
"warn", "alert", "no data". For example, if you want only the failing
groups then you would set it to ['alert', 'warn']. If no value is given
then no group states will be returned.
>>> dog_http_api.get_monitor(1234, group_states=['all'])
"""
params = {}
if group_states:
params['group_states'] = ','.join(group_states)
return self.http_request('GET', '/monitor/%s' % monitor_id, **params)
def delete_monitor(self, monitor_id):
"""
Delete the monitor identified by *monitor_id*.
>>> dog_http_api.delete_monitor(1234)
"""
return self.http_request('DELETE', '/monitor/%s' % monitor_id)
def get_all_monitors(self, group_states=None, tags=None):
"""
Get the details for all monitors. If *include_state* is set to True then
the response will include the state of each active group in the alert.
*group_states* is optionally a list of statuses chosen from "all", "ok",
"warn", "alert", "no data". For example, if you want only the failing
groups then you would set it to ['alert', 'warn']. If no value is given
then no group states will be returned.
*tags* is optionally a list of scope tags that will be used to filter
the list of monitors returned. If no value is given, then all monitors,
regardless of scope, will be returned.
>>> dog_http_api.get_all_monitors(group_states=['alert'], tags=['host:myhost'])
"""
params = {}
if group_states:
if isinstance(group_states, list):
group_states = ','.join(group_states)
if not isinstance(group_states, basestring):
raise ApiError('Invalid type for `tags`, expected `string`.')
params['group_states'] = group_states
if tags:
if isinstance(tags, list):
tags = ','.join(tags)
if not isinstance(tags, basestring):
raise ApiError('Invalid type for `tags`, expected `string`.')
params['tags'] = tags
return self.http_request('GET', '/monitor', **params)
def mute_monitors(self):
"""
Mute all monitors.
>>> dog_http_api.mute_monitors()
"""
return self.http_request('POST', '/monitor/mute_all')
def unmute_monitors(self):
"""
Unmute all monitors.
>>> dog_http_api.unmute_monitors()
"""
return self.http_request('POST', '/monitor/unmute_all')
def mute_monitor(self, monitor_id, scope=None, end=None):
"""
Mute the monitor identified by *monitor_id*. If a *scope* is given your
mute will just apply to that scope. You can give an *end* argument that
is a POSIX timestamp of when the mute should stop.
>>> dog_http_api.mute_monitor(1234, scope='env:staging')
"""
body = {}
if scope:
body['scope'] = scope
if end:
body['end'] = end
return self.http_request('POST', '/monitor/%s/mute' % monitor_id, body)
def unmute_monitor(self, monitor_id, scope=None):
"""
Unmute the monitor identified by *monitor_id*. If a *scope* is given
your unmute will just apply to that scope.
>>> dog_http_api.unmute_monitors(1234, scope='env:staging')
"""
body = {}
if scope:
body['scope'] = scope
return self.http_request('POST', '/monitor/%s/unmute' % monitor_id, body)
class DowntimeApi(object):
def schedule_downtime(self, scope, start=None, end=None, message=None):
"""
Schedule downtime over *scope* from *start* to *end*, where *start* and
*end* are POSIX timestamps. If *start* is omitted, the downtime will begin
immediately. If *end* is omitted, the downtime will continue until cancelled.
"""
body = {'scope': scope}
if start:
body['start'] = start
if end:
body['end'] = end
if message:
body['message'] = message
return self.http_request('POST', '/downtime', body,
response_formatter=lambda x: x['id'],
)
def update_downtime(self, downtime_id, scope=None, start=None, end=None, message=None):
"""
Update downtime parameters.
"""
body = {}
if scope:
body['scope'] = scope
if start:
body['start'] = start
if end:
body['end'] = end
if message:
body['message'] = message
return self.http_request('PUT', '/downtime/%s' % downtime_id, body,
response_formatter=lambda x: x['id'],
)
def get_downtime(self, downtime_id):
"""
Get the downtime identified by *downtime_id*
"""
return self.http_request('GET', '/downtime/%s' % downtime_id)
def cancel_downtime(self, downtime_id):
"""
Cancel the downtime identified by *downtime_id*
"""
return self.http_request('DELETE', '/downtime/%s' % downtime_id)
def get_all_downtimes(self, current_only=False):
"""
List all scheduled downtimes.
"""
params = {}
if current_only:
params['current_only'] = True
return self.http_request('GET', '/downtime', **params)
| {
"repo_name": "DataDog/dogapi",
"path": "src/dogapi/http/monitors.py",
"copies": "1",
"size": "8087",
"license": "bsd-3-clause",
"hash": -7777666767508158000,
"line_mean": 33.8577586207,
"line_max": 108,
"alpha_frac": 0.5717818721,
"autogenerated": false,
"ratio": 4.123916369199388,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0035082819794562594,
"num_lines": 232
} |
__all__ = [
'Dropbox',
'DropboxTeam',
'create_session',
]
# This should always be 0.0.0 in master. Only update this after tagging
# before release.
__version__ = '0.0.0'
import contextlib
import json
import logging
import random
import time
import requests
import six
from datetime import datetime, timedelta
from . import files, stone_serializers
from .auth import (
AuthError_validator,
RateLimitError_validator,
)
from .common import (
PathRoot,
PathRoot_validator,
PathRootError_validator
)
from .base import DropboxBase
from .base_team import DropboxTeamBase
from .exceptions import (
ApiError,
AuthError,
BadInputError,
HttpError,
PathRootError,
InternalServerError,
RateLimitError,
)
from .session import (
API_HOST,
API_CONTENT_HOST,
API_NOTIFICATION_HOST,
HOST_API,
HOST_CONTENT,
HOST_NOTIFY,
pinned_session,
DEFAULT_TIMEOUT
)
PATH_ROOT_HEADER = 'Dropbox-API-Path-Root'
HTTP_STATUS_INVALID_PATH_ROOT = 422
TOKEN_EXPIRATION_BUFFER = 300
SELECT_ADMIN_HEADER = 'Dropbox-API-Select-Admin'
SELECT_USER_HEADER = 'Dropbox-API-Select-User'
class RouteResult(object):
"""The successful result of a call to a route."""
def __init__(self, obj_result, http_resp=None):
"""
:param str obj_result: The result of a route not including the binary
payload portion, if one exists. Must be serialized JSON.
:param requests.models.Response http_resp: A raw HTTP response. It will
be used to stream the binary-body payload of the response.
"""
assert isinstance(obj_result, six.string_types), \
'obj_result: expected string, got %r' % type(obj_result)
if http_resp is not None:
assert isinstance(http_resp, requests.models.Response), \
'http_resp: expected requests.models.Response, got %r' % \
type(http_resp)
self.obj_result = obj_result
self.http_resp = http_resp
class RouteErrorResult(object):
"""The error result of a call to a route."""
def __init__(self, request_id, obj_result):
"""
:param str request_id: A request_id can be shared with Dropbox Support
to pinpoint the exact request that returns an error.
:param str obj_result: The result of a route not including the binary
payload portion, if one exists.
"""
self.request_id = request_id
self.obj_result = obj_result
def create_session(max_connections=8, proxies=None):
"""
Creates a session object that can be used by multiple :class:`Dropbox` and
:class:`DropboxTeam` instances. This lets you share a connection pool
amongst them, as well as proxy parameters.
:param int max_connections: Maximum connection pool size.
:param dict proxies: See the `requests module
<http://docs.python-requests.org/en/latest/user/advanced/#proxies>`_
for more details.
:rtype: :class:`requests.sessions.Session`. `See the requests module
<http://docs.python-requests.org/en/latest/user/advanced/#session-objects>`_
for more details.
"""
# We only need as many pool_connections as we have unique hostnames.
session = pinned_session(pool_maxsize=max_connections)
if proxies:
session.proxies = proxies
return session
class _DropboxTransport(object):
"""
Responsible for implementing the wire protocol for making requests to the
Dropbox API.
"""
_API_VERSION = '2'
# Download style means that the route argument goes in a Dropbox-API-Arg
# header, and the result comes back in a Dropbox-API-Result header. The
# HTTP response body contains a binary payload.
_ROUTE_STYLE_DOWNLOAD = 'download'
# Upload style means that the route argument goes in a Dropbox-API-Arg
# header. The HTTP request body contains a binary payload. The result
# comes back in a Dropbox-API-Result header.
_ROUTE_STYLE_UPLOAD = 'upload'
# RPC style means that the argument and result of a route are contained in
# the HTTP body.
_ROUTE_STYLE_RPC = 'rpc'
def __init__(self,
oauth2_access_token=None,
max_retries_on_error=4,
max_retries_on_rate_limit=None,
user_agent=None,
session=None,
headers=None,
timeout=DEFAULT_TIMEOUT,
oauth2_refresh_token=None,
oauth2_access_token_expiration=None,
app_key=None,
app_secret=None,
scope=None,):
"""
:param str oauth2_access_token: OAuth2 access token for making client
requests.
:param int max_retries_on_error: On 5xx errors, the number of times to
retry.
:param Optional[int] max_retries_on_rate_limit: On 429 errors, the
number of times to retry. If `None`, always retries.
:param str user_agent: The user agent to use when making requests. This
helps us identify requests coming from your application. We
recommend you use the format "AppName/Version". If set, we append
"/OfficialDropboxPythonSDKv2/__version__" to the user_agent,
:param session: If not provided, a new session (connection pool) is
created. To share a session across multiple clients, use
:func:`create_session`.
:type session: :class:`requests.sessions.Session`
:param dict headers: Additional headers to add to requests.
:param Optional[float] timeout: Maximum duration in seconds that
client will wait for any single packet from the
server. After the timeout the client will give up on
connection. If `None`, client will wait forever. Defaults
to 100 seconds.
:param str oauth2_refresh_token: OAuth2 refresh token for refreshing access token
:param datetime oauth2_access_token_expiration: Expiration for oauth2_access_token
:param str app_key: application key of requesting application; used for token refresh
:param str app_secret: application secret of requesting application; used for token refresh
Not required if PKCE was used to authorize the token
:param list scope: list of scopes to request on refresh. If left blank,
refresh will request all available scopes for application
"""
if not (oauth2_access_token or oauth2_refresh_token):
raise BadInputException('OAuth2 access token or refresh token must be set')
if headers is not None and not isinstance(headers, dict):
raise BadInputException('Expected dict, got {}'.format(headers))
if oauth2_refresh_token and not app_key:
raise BadInputException("app_key is required to refresh tokens")
if scope is not None and (len(scope) == 0 or not isinstance(scope, list)):
raise BadInputException("Scope list must be of type list")
self._oauth2_access_token = oauth2_access_token
self._oauth2_refresh_token = oauth2_refresh_token
self._oauth2_access_token_expiration = oauth2_access_token_expiration
self._app_key = app_key
self._app_secret = app_secret
self._scope = scope
self._max_retries_on_error = max_retries_on_error
self._max_retries_on_rate_limit = max_retries_on_rate_limit
if session:
if not isinstance(session, requests.sessions.Session):
raise BadInputException('Expected requests.sessions.Session, got {}'
.format(session))
self._session = session
else:
self._session = create_session()
self._headers = headers
base_user_agent = 'OfficialDropboxPythonSDKv2/' + __version__
if user_agent:
self._raw_user_agent = user_agent
self._user_agent = '{}/{}'.format(user_agent, base_user_agent)
else:
self._raw_user_agent = None
self._user_agent = base_user_agent
self._logger = logging.getLogger('dropbox')
self._host_map = {HOST_API: API_HOST,
HOST_CONTENT: API_CONTENT_HOST,
HOST_NOTIFY: API_NOTIFICATION_HOST}
self._timeout = timeout
def clone(
self,
oauth2_access_token=None,
max_retries_on_error=None,
max_retries_on_rate_limit=None,
user_agent=None,
session=None,
headers=None,
timeout=None,
oauth2_refresh_token=None,
oauth2_access_token_expiration=None,
app_key=None,
app_secret=None,
scope=None):
"""
Creates a new copy of the Dropbox client with the same defaults unless modified by
arguments to clone()
See constructor for original parameter descriptions.
:return: New instance of Dropbox client
:rtype: Dropbox
"""
return self.__class__(
oauth2_access_token or self._oauth2_access_token,
max_retries_on_error or self._max_retries_on_error,
max_retries_on_rate_limit or self._max_retries_on_rate_limit,
user_agent or self._user_agent,
session or self._session,
headers or self._headers,
timeout or self._timeout,
oauth2_refresh_token or self._oauth2_refresh_token,
oauth2_access_token_expiration or self._oauth2_access_token_expiration,
app_key or self._app_key,
app_secret or self._app_secret,
scope or self._scope
)
def request(self,
route,
namespace,
request_arg,
request_binary,
timeout=None):
"""
Makes a request to the Dropbox API and in the process validates that
the route argument and result are the expected data types. The
request_arg is converted to JSON based on the arg_data_type. Likewise,
the response is deserialized from JSON and converted to an object based
on the {result,error}_data_type.
:param host: The Dropbox API host to connect to.
:param route: The route to make the request to.
:type route: :class:`.datatypes.stone_base.Route`
:param request_arg: Argument for the route that conforms to the
validator specified by route.arg_type.
:param request_binary: String or file pointer representing the binary
payload. Use None if there is no binary payload.
:param Optional[float] timeout: Maximum duration in seconds
that client will wait for any single packet from the
server. After the timeout the client will give up on
connection. If `None`, will use default timeout set on
Dropbox object. Defaults to `None`.
:return: The route's result.
"""
self.check_and_refresh_access_token()
host = route.attrs['host'] or 'api'
route_name = namespace + '/' + route.name
if route.version > 1:
route_name += '_v{}'.format(route.version)
route_style = route.attrs['style'] or 'rpc'
serialized_arg = stone_serializers.json_encode(route.arg_type,
request_arg)
if (timeout is None and
route == files.list_folder_longpoll):
# The client normally sends a timeout value to the
# longpoll route. The server will respond after
# <timeout> + random(0, 90) seconds. We increase the
# socket timeout to the longpoll timeout value plus 90
# seconds so that we don't cut the server response short
# due to a shorter socket timeout.
# NB: This is done here because base.py is auto-generated
timeout = request_arg.timeout + 90
res = self.request_json_string_with_retry(host,
route_name,
route_style,
serialized_arg,
request_binary,
timeout=timeout)
decoded_obj_result = json.loads(res.obj_result)
if isinstance(res, RouteResult):
returned_data_type = route.result_type
obj = decoded_obj_result
elif isinstance(res, RouteErrorResult):
returned_data_type = route.error_type
obj = decoded_obj_result['error']
user_message = decoded_obj_result.get('user_message')
user_message_text = user_message and user_message.get('text')
user_message_locale = user_message and user_message.get('locale')
else:
raise AssertionError('Expected RouteResult or RouteErrorResult, '
'but res is %s' % type(res))
deserialized_result = stone_serializers.json_compat_obj_decode(
returned_data_type, obj, strict=False)
if isinstance(res, RouteErrorResult):
raise ApiError(res.request_id,
deserialized_result,
user_message_text,
user_message_locale)
elif route_style == self._ROUTE_STYLE_DOWNLOAD:
return (deserialized_result, res.http_resp)
else:
return deserialized_result
def check_and_refresh_access_token(self):
"""
Checks if access token needs to be refreshed and refreshes if possible
:return:
"""
can_refresh = self._oauth2_refresh_token and self._app_key
needs_refresh = self._oauth2_access_token_expiration and \
(datetime.utcnow() + timedelta(seconds=TOKEN_EXPIRATION_BUFFER)) >= \
self._oauth2_access_token_expiration
needs_token = not self._oauth2_access_token
if (needs_refresh or needs_token) and can_refresh:
self.refresh_access_token(scope=self._scope)
def refresh_access_token(self, host=API_HOST, scope=None):
"""
Refreshes an access token via refresh token if available
:param host: host to hit token endpoint with
:param scope: list of permission scopes for access token
:return:
"""
if scope is not None and (len(scope) == 0 or not isinstance(scope, list)):
raise BadInputException("Scope list must be of type list")
if not (self._oauth2_refresh_token and self._app_key):
self._logger.warning('Unable to refresh access token without \
refresh token and app key')
return
self._logger.info('Refreshing access token.')
url = "https://{}/oauth2/token".format(host)
body = {'grant_type': 'refresh_token',
'refresh_token': self._oauth2_refresh_token,
'client_id': self._app_key,
}
if self._app_secret:
body['client_secret'] = self._app_secret
if scope:
scope = " ".join(scope)
body['scope'] = scope
timeout = DEFAULT_TIMEOUT
if self._timeout:
timeout = self._timeout
res = self._session.post(url, data=body, timeout=timeout)
if res.status_code == 400 and res.json()['error'] == 'invalid_grant':
request_id = res.headers.get('x-dropbox-request-id')
err = stone_serializers.json_compat_obj_decode(
AuthError_validator, 'invalid_access_token')
raise AuthError(request_id, err)
res.raise_for_status()
token_content = res.json()
self._oauth2_access_token = token_content["access_token"]
self._oauth2_access_token_expiration = datetime.utcnow() + \
timedelta(seconds=int(token_content["expires_in"]))
def request_json_object(self,
host,
route_name,
route_style,
request_arg,
request_binary,
timeout=None):
"""
Makes a request to the Dropbox API, taking a JSON-serializable Python
object as an argument, and returning one as a response.
:param host: The Dropbox API host to connect to.
:param route_name: The name of the route to invoke.
:param route_style: The style of the route.
:param str request_arg: A JSON-serializable Python object representing
the argument for the route.
:param Optional[bytes] request_binary: Bytes representing the binary
payload. Use None if there is no binary payload.
:param Optional[float] timeout: Maximum duration in seconds
that client will wait for any single packet from the
server. After the timeout the client will give up on
connection. If `None`, will use default timeout set on
Dropbox object. Defaults to `None`.
:return: The route's result as a JSON-serializable Python object.
"""
serialized_arg = json.dumps(request_arg)
res = self.request_json_string_with_retry(host,
route_name,
route_style,
serialized_arg,
request_binary,
timeout=timeout)
# This can throw a ValueError if the result is not deserializable,
# but that would be completely unexpected.
deserialized_result = json.loads(res.obj_result)
if isinstance(res, RouteResult) and res.http_resp is not None:
return (deserialized_result, res.http_resp)
else:
return deserialized_result
def request_json_string_with_retry(self,
host,
route_name,
route_style,
request_json_arg,
request_binary,
timeout=None):
"""
See :meth:`request_json_object` for description of parameters.
:param request_json_arg: A string representing the serialized JSON
argument to the route.
"""
attempt = 0
rate_limit_errors = 0
has_refreshed = False
while True:
self._logger.info('Request to %s', route_name)
try:
return self.request_json_string(host,
route_name,
route_style,
request_json_arg,
request_binary,
timeout=timeout)
except AuthError as e:
if e.error and e.error.is_expired_access_token():
if has_refreshed:
raise
else:
self._logger.info(
'ExpiredCredentials status_code=%s: Refreshing and Retrying',
e.status_code)
self.refresh_access_token()
has_refreshed = True
else:
raise
except InternalServerError as e:
attempt += 1
if attempt <= self._max_retries_on_error:
# Use exponential backoff
backoff = 2**attempt * random.random()
self._logger.info(
'HttpError status_code=%s: Retrying in %.1f seconds',
e.status_code, backoff)
time.sleep(backoff)
else:
raise
except RateLimitError as e:
rate_limit_errors += 1
if (self._max_retries_on_rate_limit is None or
self._max_retries_on_rate_limit >= rate_limit_errors):
# Set default backoff to 5 seconds.
backoff = e.backoff if e.backoff is not None else 5.0
self._logger.info(
'Ratelimit: Retrying in %.1f seconds.', backoff)
time.sleep(backoff)
else:
raise
def request_json_string(self,
host,
func_name,
route_style,
request_json_arg,
request_binary,
timeout=None):
"""
See :meth:`request_json_string_with_retry` for description of
parameters.
"""
if host not in self._host_map:
raise ValueError('Unknown value for host: %r' % host)
if not isinstance(request_binary, (six.binary_type, type(None))):
# Disallow streams and file-like objects even though the underlying
# requests library supports them. This is to prevent incorrect
# behavior when a non-rewindable stream is read from, but the
# request fails and needs to be re-tried at a later time.
raise TypeError('expected request_binary as binary type, got %s' %
type(request_binary))
# Fully qualified hostname
fq_hostname = self._host_map[host]
url = self._get_route_url(fq_hostname, func_name)
headers = {'User-Agent': self._user_agent}
if host != HOST_NOTIFY:
headers['Authorization'] = 'Bearer %s' % self._oauth2_access_token
if self._headers:
headers.update(self._headers)
# The contents of the body of the HTTP request
body = None
# Whether the response should be streamed incrementally, or buffered
# entirely. If stream is True, the caller is responsible for closing
# the HTTP response.
stream = False
if route_style == self._ROUTE_STYLE_RPC:
headers['Content-Type'] = 'application/json'
body = request_json_arg
elif route_style == self._ROUTE_STYLE_DOWNLOAD:
headers['Dropbox-API-Arg'] = request_json_arg
stream = True
elif route_style == self._ROUTE_STYLE_UPLOAD:
headers['Content-Type'] = 'application/octet-stream'
headers['Dropbox-API-Arg'] = request_json_arg
body = request_binary
else:
raise ValueError('Unknown operation style: %r' % route_style)
if timeout is None:
timeout = self._timeout
r = self._session.post(url,
headers=headers,
data=body,
stream=stream,
verify=True,
timeout=timeout,
)
request_id = r.headers.get('x-dropbox-request-id')
if r.status_code >= 500:
raise InternalServerError(request_id, r.status_code, r.text)
elif r.status_code == 400:
raise BadInputError(request_id, r.text)
elif r.status_code == 401:
assert r.headers.get('content-type') == 'application/json', (
'Expected content-type to be application/json, got %r' %
r.headers.get('content-type'))
err = stone_serializers.json_compat_obj_decode(
AuthError_validator, r.json()['error'])
raise AuthError(request_id, err)
elif r.status_code == HTTP_STATUS_INVALID_PATH_ROOT:
err = stone_serializers.json_compat_obj_decode(
PathRootError_validator, r.json()['error'])
raise PathRootError(request_id, err)
elif r.status_code == 429:
err = None
if r.headers.get('content-type') == 'application/json':
err = stone_serializers.json_compat_obj_decode(
RateLimitError_validator, r.json()['error'])
retry_after = err.retry_after
else:
retry_after_str = r.headers.get('retry-after')
if retry_after_str is not None:
retry_after = int(retry_after_str)
else:
retry_after = None
raise RateLimitError(request_id, err, retry_after)
elif 200 <= r.status_code <= 299:
if route_style == self._ROUTE_STYLE_DOWNLOAD:
raw_resp = r.headers['dropbox-api-result']
else:
assert r.headers.get('content-type') == 'application/json', (
'Expected content-type to be application/json, got %r' %
r.headers.get('content-type'))
raw_resp = r.content.decode('utf-8')
if route_style == self._ROUTE_STYLE_DOWNLOAD:
return RouteResult(raw_resp, r)
else:
return RouteResult(raw_resp)
elif r.status_code in (403, 404, 409):
raw_resp = r.content.decode('utf-8')
return RouteErrorResult(request_id, raw_resp)
else:
raise HttpError(request_id, r.status_code, r.text)
def _get_route_url(self, hostname, route_name):
"""Returns the URL of the route.
:param str hostname: Hostname to make the request to.
:param str route_name: Name of the route.
:rtype: str
"""
return 'https://{hostname}/{version}/{route_name}'.format(
hostname=hostname,
version=Dropbox._API_VERSION,
route_name=route_name,
)
def _save_body_to_file(self, download_path, http_resp, chunksize=2**16):
"""
Saves the body of an HTTP response to a file.
:param str download_path: Local path to save data to.
:param http_resp: The HTTP response whose body will be saved.
:type http_resp: :class:`requests.models.Response`
:rtype: None
"""
with open(download_path, 'wb') as f:
with contextlib.closing(http_resp):
for c in http_resp.iter_content(chunksize):
f.write(c)
def with_path_root(self, path_root):
"""
Creates a clone of the Dropbox instance with the Dropbox-API-Path-Root header
as the appropriate serialized instance of PathRoot.
For more information, see
https://www.dropbox.com/developers/reference/namespace-guide#pathrootmodes
:param PathRoot path_root: instance of PathRoot to serialize into the headers field
:return: A :class: `Dropbox`
:rtype: Dropbox
"""
if not isinstance(path_root, PathRoot):
raise ValueError("path_root must be an instance of PathRoot")
new_headers = self._headers.copy() if self._headers else {}
new_headers[PATH_ROOT_HEADER] = stone_serializers.json_encode(PathRoot_validator, path_root)
return self.clone(
headers=new_headers
)
def close(self):
"""
Cleans up all resources like the request session/network connection.
"""
self._session.close()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
class Dropbox(_DropboxTransport, DropboxBase):
"""
Use this class to make requests to the Dropbox API using a user's access
token. Methods of this class are meant to act on the corresponding user's
Dropbox.
"""
pass
class DropboxTeam(_DropboxTransport, DropboxTeamBase):
"""
Use this class to make requests to the Dropbox API using a team's access
token. Methods of this class are meant to act on the team, but there is
also an :meth:`as_user` method for assuming a team member's identity.
"""
def as_admin(self, team_member_id):
"""
Allows a team credential to assume the identity of an administrator on the team
and perform operations on any team-owned content.
:param str team_member_id: team member id of administrator to perform actions with
:return: A :class:`Dropbox` object that can be used to query on behalf
of this admin of the team.
:rtype: Dropbox
"""
return self._get_dropbox_client_with_select_header(SELECT_ADMIN_HEADER,
team_member_id)
def as_user(self, team_member_id):
"""
Allows a team credential to assume the identity of a member of the
team.
:param str team_member_id: team member id of team member to perform actions with
:return: A :class:`Dropbox` object that can be used to query on behalf
of this member of the team.
:rtype: Dropbox
"""
return self._get_dropbox_client_with_select_header(SELECT_USER_HEADER,
team_member_id)
def _get_dropbox_client_with_select_header(self, select_header_name, team_member_id):
"""
Get Dropbox client with modified headers
:param str select_header_name: Header name used to select users
:param str team_member_id: team member id of team member to perform actions with
:return: A :class:`Dropbox` object that can be used to query on behalf
of a member or admin of the team
:rtype: Dropbox
"""
new_headers = self._headers.copy() if self._headers else {}
new_headers[select_header_name] = team_member_id
return Dropbox(
oauth2_access_token=self._oauth2_access_token,
oauth2_refresh_token=self._oauth2_refresh_token,
oauth2_access_token_expiration=self._oauth2_access_token_expiration,
max_retries_on_error=self._max_retries_on_error,
max_retries_on_rate_limit=self._max_retries_on_rate_limit,
timeout=self._timeout,
user_agent=self._raw_user_agent,
session=self._session,
headers=new_headers,
app_key=self._app_key,
app_secret=self._app_secret,
scope=self._scope,
)
class BadInputException(Exception):
"""
Thrown if incorrect types/values are used
This should only ever be thrown during testing, app should have validation of input prior to
reaching this point
"""
pass
| {
"repo_name": "dropbox/dropbox-sdk-python",
"path": "dropbox/dropbox.py",
"copies": "1",
"size": "31005",
"license": "mit",
"hash": -1470882700649511700,
"line_mean": 40.2300531915,
"line_max": 100,
"alpha_frac": 0.5724238026,
"autogenerated": false,
"ratio": 4.492827126503405,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005623876446664355,
"num_lines": 752
} |
__all__ = [
'Dropbox',
'DropboxTeam',
'create_session',
]
# This should always be 0.0.0 in master. Only update this after tagging
# before release.
__version__ = '9.4.0'
import contextlib
import json
import logging
import random
import time
import requests
import six
from . import files, stone_serializers
from .auth import (
AuthError_validator,
RateLimitError_validator,
)
from .common import (
PathRoot,
PathRoot_validator,
PathRootError_validator
)
from .base import DropboxBase
from .base_team import DropboxTeamBase
from .exceptions import (
ApiError,
AuthError,
BadInputError,
HttpError,
PathRootError,
InternalServerError,
RateLimitError,
)
from .session import (
API_HOST,
API_CONTENT_HOST,
API_NOTIFICATION_HOST,
HOST_API,
HOST_CONTENT,
HOST_NOTIFY,
pinned_session,
)
PATH_ROOT_HEADER = 'Dropbox-API-Path-Root'
HTTP_STATUS_INVALID_PATH_ROOT = 422
class RouteResult(object):
"""The successful result of a call to a route."""
def __init__(self, obj_result, http_resp=None):
"""
:param str obj_result: The result of a route not including the binary
payload portion, if one exists. Must be serialized JSON.
:param requests.models.Response http_resp: A raw HTTP response. It will
be used to stream the binary-body payload of the response.
"""
assert isinstance(obj_result, six.string_types), \
'obj_result: expected string, got %r' % type(obj_result)
if http_resp is not None:
assert isinstance(http_resp, requests.models.Response), \
'http_resp: expected requests.models.Response, got %r' % \
type(http_resp)
self.obj_result = obj_result
self.http_resp = http_resp
class RouteErrorResult(object):
"""The error result of a call to a route."""
def __init__(self, request_id, obj_result):
"""
:param str request_id: A request_id can be shared with Dropbox Support
to pinpoint the exact request that returns an error.
:param str obj_result: The result of a route not including the binary
payload portion, if one exists.
"""
self.request_id = request_id
self.obj_result = obj_result
def create_session(max_connections=8, proxies=None):
"""
Creates a session object that can be used by multiple :class:`Dropbox` and
:class:`DropboxTeam` instances. This lets you share a connection pool
amongst them, as well as proxy parameters.
:param int max_connections: Maximum connection pool size.
:param dict proxies: See the `requests module
<http://docs.python-requests.org/en/latest/user/advanced/#proxies>`_
for more details.
:rtype: :class:`requests.sessions.Session`. `See the requests module
<http://docs.python-requests.org/en/latest/user/advanced/#session-objects>`_
for more details.
"""
# We only need as many pool_connections as we have unique hostnames.
session = pinned_session(pool_maxsize=max_connections)
if proxies:
session.proxies = proxies
return session
class _DropboxTransport(object):
"""
Responsible for implementing the wire protocol for making requests to the
Dropbox API.
"""
_API_VERSION = '2'
# Download style means that the route argument goes in a Dropbox-API-Arg
# header, and the result comes back in a Dropbox-API-Result header. The
# HTTP response body contains a binary payload.
_ROUTE_STYLE_DOWNLOAD = 'download'
# Upload style means that the route argument goes in a Dropbox-API-Arg
# header. The HTTP request body contains a binary payload. The result
# comes back in a Dropbox-API-Result header.
_ROUTE_STYLE_UPLOAD = 'upload'
# RPC style means that the argument and result of a route are contained in
# the HTTP body.
_ROUTE_STYLE_RPC = 'rpc'
# This is the default longest time we'll block on receiving data from the server
_DEFAULT_TIMEOUT = 30
def __init__(self,
oauth2_access_token,
max_retries_on_error=4,
max_retries_on_rate_limit=None,
user_agent=None,
session=None,
headers=None,
timeout=_DEFAULT_TIMEOUT):
"""
:param str oauth2_access_token: OAuth2 access token for making client
requests.
:param int max_retries_on_error: On 5xx errors, the number of times to
retry.
:param Optional[int] max_retries_on_rate_limit: On 429 errors, the
number of times to retry. If `None`, always retries.
:param str user_agent: The user agent to use when making requests. This
helps us identify requests coming from your application. We
recommend you use the format "AppName/Version". If set, we append
"/OfficialDropboxPythonSDKv2/__version__" to the user_agent,
:param session: If not provided, a new session (connection pool) is
created. To share a session across multiple clients, use
:func:`create_session`.
:type session: :class:`requests.sessions.Session`
:param dict headers: Additional headers to add to requests.
:param Optional[float] timeout: Maximum duration in seconds that
client will wait for any single packet from the
server. After the timeout the client will give up on
connection. If `None`, client will wait forever. Defaults
to 30 seconds.
"""
assert len(oauth2_access_token) > 0, \
'OAuth2 access token cannot be empty.'
assert headers is None or isinstance(headers, dict), \
'Expected dict, got %r' % headers
self._oauth2_access_token = oauth2_access_token
self._max_retries_on_error = max_retries_on_error
self._max_retries_on_rate_limit = max_retries_on_rate_limit
if session:
assert isinstance(session, requests.sessions.Session), \
'Expected requests.sessions.Session, got %r' % session
self._session = session
else:
self._session = create_session()
self._headers = headers
base_user_agent = 'OfficialDropboxPythonSDKv2/' + __version__
if user_agent:
self._raw_user_agent = user_agent
self._user_agent = '{}/{}'.format(user_agent, base_user_agent)
else:
self._raw_user_agent = None
self._user_agent = base_user_agent
self._logger = logging.getLogger('dropbox')
self._host_map = {HOST_API: API_HOST,
HOST_CONTENT: API_CONTENT_HOST,
HOST_NOTIFY: API_NOTIFICATION_HOST}
self._timeout = timeout
def clone(
self,
oauth2_access_token=None,
max_retries_on_error=None,
max_retries_on_rate_limit=None,
user_agent=None,
session=None,
headers=None,
timeout=None):
"""
Creates a new copy of the Dropbox client with the same defaults unless modified by
arguments to clone()
See constructor for original parameter descriptions.
:return: New instance of Dropbox clent
:rtype: Dropbox
"""
return self.__class__(
oauth2_access_token or self._oauth2_access_token,
max_retries_on_error or self._max_retries_on_error,
max_retries_on_rate_limit or self._max_retries_on_rate_limit,
user_agent or self._user_agent,
session or self._session,
headers or self._headers,
timeout or self._timeout
)
def request(self,
route,
namespace,
request_arg,
request_binary,
timeout=None):
"""
Makes a request to the Dropbox API and in the process validates that
the route argument and result are the expected data types. The
request_arg is converted to JSON based on the arg_data_type. Likewise,
the response is deserialized from JSON and converted to an object based
on the {result,error}_data_type.
:param host: The Dropbox API host to connect to.
:param route: The route to make the request to.
:type route: :class:`.datatypes.stone_base.Route`
:param request_arg: Argument for the route that conforms to the
validator specified by route.arg_type.
:param request_binary: String or file pointer representing the binary
payload. Use None if there is no binary payload.
:param Optional[float] timeout: Maximum duration in seconds
that client will wait for any single packet from the
server. After the timeout the client will give up on
connection. If `None`, will use default timeout set on
Dropbox object. Defaults to `None`.
:return: The route's result.
"""
host = route.attrs['host'] or 'api'
route_name = namespace + '/' + route.name
if route.version > 1:
route_name += '_v{}'.format(route.version)
route_style = route.attrs['style'] or 'rpc'
serialized_arg = stone_serializers.json_encode(route.arg_type,
request_arg)
if (timeout is None and
route == files.list_folder_longpoll):
# The client normally sends a timeout value to the
# longpoll route. The server will respond after
# <timeout> + random(0, 90) seconds. We increase the
# socket timeout to the longpoll timeout value plus 90
# seconds so that we don't cut the server response short
# due to a shorter socket timeout.
# NB: This is done here because base.py is auto-generated
timeout = request_arg.timeout + 90
res = self.request_json_string_with_retry(host,
route_name,
route_style,
serialized_arg,
request_binary,
timeout=timeout)
decoded_obj_result = json.loads(res.obj_result)
if isinstance(res, RouteResult):
returned_data_type = route.result_type
obj = decoded_obj_result
elif isinstance(res, RouteErrorResult):
returned_data_type = route.error_type
obj = decoded_obj_result['error']
user_message = decoded_obj_result.get('user_message')
user_message_text = user_message and user_message.get('text')
user_message_locale = user_message and user_message.get('locale')
else:
raise AssertionError('Expected RouteResult or RouteErrorResult, '
'but res is %s' % type(res))
deserialized_result = stone_serializers.json_compat_obj_decode(
returned_data_type, obj, strict=False)
if isinstance(res, RouteErrorResult):
raise ApiError(res.request_id,
deserialized_result,
user_message_text,
user_message_locale)
elif route_style == self._ROUTE_STYLE_DOWNLOAD:
return (deserialized_result, res.http_resp)
else:
return deserialized_result
def request_json_object(self,
host,
route_name,
route_style,
request_arg,
request_binary,
timeout=None):
"""
Makes a request to the Dropbox API, taking a JSON-serializable Python
object as an argument, and returning one as a response.
:param host: The Dropbox API host to connect to.
:param route_name: The name of the route to invoke.
:param route_style: The style of the route.
:param str request_arg: A JSON-serializable Python object representing
the argument for the route.
:param Optional[bytes] request_binary: Bytes representing the binary
payload. Use None if there is no binary payload.
:param Optional[float] timeout: Maximum duration in seconds
that client will wait for any single packet from the
server. After the timeout the client will give up on
connection. If `None`, will use default timeout set on
Dropbox object. Defaults to `None`.
:return: The route's result as a JSON-serializable Python object.
"""
serialized_arg = json.dumps(request_arg)
res = self.request_json_string_with_retry(host,
route_name,
route_style,
serialized_arg,
request_binary,
timeout=timeout)
# This can throw a ValueError if the result is not deserializable,
# but that would be completely unexpected.
deserialized_result = json.loads(res.obj_result)
if isinstance(res, RouteResult) and res.http_resp is not None:
return (deserialized_result, res.http_resp)
else:
return deserialized_result
def request_json_string_with_retry(self,
host,
route_name,
route_style,
request_json_arg,
request_binary,
timeout=None):
"""
See :meth:`request_json_object` for description of parameters.
:param request_json_arg: A string representing the serialized JSON
argument to the route.
"""
attempt = 0
rate_limit_errors = 0
while True:
self._logger.info('Request to %s', route_name)
try:
return self.request_json_string(host,
route_name,
route_style,
request_json_arg,
request_binary,
timeout=timeout)
except InternalServerError as e:
attempt += 1
if attempt <= self._max_retries_on_error:
# Use exponential backoff
backoff = 2**attempt * random.random()
self._logger.info(
'HttpError status_code=%s: Retrying in %.1f seconds',
e.status_code, backoff)
time.sleep(backoff)
else:
raise
except RateLimitError as e:
rate_limit_errors += 1
if (self._max_retries_on_rate_limit is None or
self._max_retries_on_rate_limit >= rate_limit_errors):
# Set default backoff to 5 seconds.
backoff = e.backoff if e.backoff is not None else 5.0
self._logger.info(
'Ratelimit: Retrying in %.1f seconds.', backoff)
time.sleep(backoff)
else:
raise
def request_json_string(self,
host,
func_name,
route_style,
request_json_arg,
request_binary,
timeout=None):
"""
See :meth:`request_json_string_with_retry` for description of
parameters.
"""
if host not in self._host_map:
raise ValueError('Unknown value for host: %r' % host)
if not isinstance(request_binary, (six.binary_type, type(None))):
# Disallow streams and file-like objects even though the underlying
# requests library supports them. This is to prevent incorrect
# behavior when a non-rewindable stream is read from, but the
# request fails and needs to be re-tried at a later time.
raise TypeError('expected request_binary as binary type, got %s' %
type(request_binary))
# Fully qualified hostname
fq_hostname = self._host_map[host]
url = self._get_route_url(fq_hostname, func_name)
headers = {'User-Agent': self._user_agent}
if host != HOST_NOTIFY:
headers['Authorization'] = 'Bearer %s' % self._oauth2_access_token
if self._headers:
headers.update(self._headers)
# The contents of the body of the HTTP request
body = None
# Whether the response should be streamed incrementally, or buffered
# entirely. If stream is True, the caller is responsible for closing
# the HTTP response.
stream = False
if route_style == self._ROUTE_STYLE_RPC:
headers['Content-Type'] = 'application/json'
body = request_json_arg
elif route_style == self._ROUTE_STYLE_DOWNLOAD:
headers['Dropbox-API-Arg'] = request_json_arg
stream = True
elif route_style == self._ROUTE_STYLE_UPLOAD:
headers['Content-Type'] = 'application/octet-stream'
headers['Dropbox-API-Arg'] = request_json_arg
body = request_binary
else:
raise ValueError('Unknown operation style: %r' % route_style)
if timeout is None:
timeout = self._timeout
r = self._session.post(url,
headers=headers,
data=body,
stream=stream,
verify=True,
timeout=timeout,
)
request_id = r.headers.get('x-dropbox-request-id')
if r.status_code >= 500:
raise InternalServerError(request_id, r.status_code, r.text)
elif r.status_code == 400:
raise BadInputError(request_id, r.text)
elif r.status_code == 401:
assert r.headers.get('content-type') == 'application/json', (
'Expected content-type to be application/json, got %r' %
r.headers.get('content-type'))
err = stone_serializers.json_compat_obj_decode(
AuthError_validator, r.json()['error'])
raise AuthError(request_id, err)
elif r.status_code == HTTP_STATUS_INVALID_PATH_ROOT:
err = stone_serializers.json_compat_obj_decode(
PathRootError_validator, r.json()['error'])
raise PathRootError(request_id, err)
elif r.status_code == 429:
err = None
if r.headers.get('content-type') == 'application/json':
err = stone_serializers.json_compat_obj_decode(
RateLimitError_validator, r.json()['error'])
retry_after = err.retry_after
else:
retry_after_str = r.headers.get('retry-after')
if retry_after_str is not None:
retry_after = int(retry_after_str)
else:
retry_after = None
raise RateLimitError(request_id, err, retry_after)
elif 200 <= r.status_code <= 299:
if route_style == self._ROUTE_STYLE_DOWNLOAD:
raw_resp = r.headers['dropbox-api-result']
else:
assert r.headers.get('content-type') == 'application/json', (
'Expected content-type to be application/json, got %r' %
r.headers.get('content-type'))
raw_resp = r.content.decode('utf-8')
if route_style == self._ROUTE_STYLE_DOWNLOAD:
return RouteResult(raw_resp, r)
else:
return RouteResult(raw_resp)
elif r.status_code in (403, 404, 409):
raw_resp = r.content.decode('utf-8')
return RouteErrorResult(request_id, raw_resp)
else:
raise HttpError(request_id, r.status_code, r.text)
def _get_route_url(self, hostname, route_name):
"""Returns the URL of the route.
:param str hostname: Hostname to make the request to.
:param str route_name: Name of the route.
:rtype: str
"""
return 'https://{hostname}/{version}/{route_name}'.format(
hostname=hostname,
version=Dropbox._API_VERSION,
route_name=route_name,
)
def _save_body_to_file(self, download_path, http_resp, chunksize=2**16):
"""
Saves the body of an HTTP response to a file.
:param str download_path: Local path to save data to.
:param http_resp: The HTTP response whose body will be saved.
:type http_resp: :class:`requests.models.Response`
:rtype: None
"""
with open(download_path, 'wb') as f:
with contextlib.closing(http_resp):
for c in http_resp.iter_content(chunksize):
f.write(c)
def with_path_root(self, path_root):
"""
Creates a clone of the Dropbox instance with the Dropbox-API-Path-Root header
as the appropriate serialized instance of PathRoot.
For more information, see
https://www.dropbox.com/developers/reference/namespace-guide#pathrootmodes
:param PathRoot path_root: instance of PathRoot to serialize into the headers field
:return: A :class: `Dropbox`
:rtype: Dropbox
"""
if not isinstance(path_root, PathRoot):
raise ValueError("path_root must be an instance of PathRoot")
return self.clone(
headers={
PATH_ROOT_HEADER: stone_serializers.json_encode(PathRoot_validator, path_root)
}
)
class Dropbox(_DropboxTransport, DropboxBase):
"""
Use this class to make requests to the Dropbox API using a user's access
token. Methods of this class are meant to act on the corresponding user's
Dropbox.
"""
pass
class DropboxTeam(_DropboxTransport, DropboxTeamBase):
"""
Use this class to make requests to the Dropbox API using a team's access
token. Methods of this class are meant to act on the team, but there is
also an :meth:`as_user` method for assuming a team member's identity.
"""
def as_admin(self, team_member_id):
"""
Allows a team credential to assume the identity of an administrator on the team
and perform operations on any team-owned content.
:param str team_member_id: team member id of administrator to perform actions with
:return: A :class:`Dropbox` object that can be used to query on behalf
of this admin of the team.
:rtype: Dropbox
"""
return self._get_dropbox_client_with_select_header('Dropbox-API-Select-Admin',
team_member_id)
def as_user(self, team_member_id):
"""
Allows a team credential to assume the identity of a member of the
team.
:param str team_member_id: team member id of team member to perform actions with
:return: A :class:`Dropbox` object that can be used to query on behalf
of this member of the team.
:rtype: Dropbox
"""
return self._get_dropbox_client_with_select_header('Dropbox-API-Select-User',
team_member_id)
def _get_dropbox_client_with_select_header(self, select_header_name, team_member_id):
"""
Get Dropbox client with modified headers
:param str select_header_name: Header name used to select users
:param str team_member_id: team member id of team member to perform actions with
:return: A :class:`Dropbox` object that can be used to query on behalf
of a member or admin of the team
:rtype: Dropbox
"""
new_headers = self._headers.copy() if self._headers else {}
new_headers[select_header_name] = team_member_id
return Dropbox(
self._oauth2_access_token,
max_retries_on_error=self._max_retries_on_error,
max_retries_on_rate_limit=self._max_retries_on_rate_limit,
timeout=self._timeout,
user_agent=self._raw_user_agent,
session=self._session,
headers=new_headers,
)
| {
"repo_name": "micahg/script.module.dropbox",
"path": "lib/dropbox/dropbox.py",
"copies": "1",
"size": "25197",
"license": "mit",
"hash": 2849959819126757000,
"line_mean": 40.3743842365,
"line_max": 94,
"alpha_frac": 0.5664563242,
"autogenerated": false,
"ratio": 4.532649757150566,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00047906689503492074,
"num_lines": 609
} |
__all__ = [
'Dropbox',
]
# TODO(kelkabany): We need to auto populate this as done in the v1 SDK.
__version__ = '3.23'
import json
import logging
import os
import random
import six
import time
import requests
from . import babel_serializers
from .base import DropboxBase
from .exceptions import (
ApiError,
AuthError,
BadInputError,
HttpError,
InternalServerError,
RateLimitError,
)
from .session import pinned_session
class RouteResult(object):
"""The successful result of a call to a route."""
def __init__(self, obj_result, http_resp=None):
"""
:param str obj_result: The result of a route not including the binary
payload portion, if one exists. Must be serialized JSON.
:param requests.models.Response http_resp: A raw HTTP response. It will
be used to stream the binary-body payload of the response.
"""
assert isinstance(obj_result, six.string_types), \
'obj_result: expected string, got %r' % type(obj_result)
if http_resp is not None:
assert isinstance(http_resp, requests.models.Response), \
'http_resp: expected requests.models.Response, got %r' % \
type(http_resp)
self.obj_result = obj_result
self.http_resp = http_resp
class RouteErrorResult(object):
"""The error result of a call to a route."""
def __init__(self, obj_result):
"""
:param str obj_result: The result of a route not including the binary
payload portion, if one exists.
"""
self.obj_result = obj_result
class Dropbox(DropboxBase):
"""
Use this to make requests to the Dropbox API.
"""
API_VERSION = '2-beta-2'
DEFAULT_DOMAIN = 'dropbox.com'
# host for web routes (used for oauth2)
HOST_WEB = 'www'
# Host for RPC-style routes.
HOST_API = 'api'
# Host for upload and download-style routes.
HOST_CONTENT = 'content'
# Download style means that the route argument goes in a Dropbox-API-Arg
# header, and the result comes back in a Dropbox-API-Result header. The
# HTTP response body contains a binary payload.
ROUTE_STYLE_DOWNLOAD = 'download'
# Upload style means that the route argument goes in a Dropbox-API-Arg
# header. The HTTP request body contains a binary payload. The result
# comes back in a Dropbox-API-Result header.
ROUTE_STYLE_UPLOAD = 'upload'
# RPC style means that the argument and result of a route are contained in
# the HTTP body.
ROUTE_STYLE_RPC = 'rpc'
def __init__(self,
oauth2_access_token,
max_connections=8,
max_retries_on_error=4,
user_agent=None):
"""
:param str oauth2_access_token: OAuth2 access token for making client requests.
:param int max_connections: Maximum connection pool size.
:param int max_retries_on_error: On 5xx errors, the number of times to retry.
:param str user_agent: The user agent to use when making requests. This helps
us identify requests coming from your application. We recommend you use
the format "AppName/Version". If set, we append
"/OfficialDropboxPythonV2SDK/__version__" to the user_agent,
"""
self._oauth2_access_token = oauth2_access_token
# We only need as many pool_connections as we have unique hostnames.
self._session = pinned_session(pool_maxsize=max_connections)
self._max_retries_on_error = max_retries_on_error
base_user_agent = 'OfficialDropboxPythonV2SDK/' + __version__
if user_agent:
self._user_agent = '{}/{}'.format(user_agent, base_user_agent)
else:
self._user_agent = base_user_agent
self._logger = logging.getLogger('dropbox')
self._domain = os.environ.get('DROPBOX_DOMAIN', Dropbox.DEFAULT_DOMAIN)
self._api_hostname = os.environ.get(
'DROPBOX_API_HOST', 'api.' + self._domain)
self._api_content_hostname = os.environ.get(
'DROPBOX_API_CONTENT_HOST', 'api-content.' + self._domain)
self._host_map = {self.HOST_API: self._api_hostname,
self.HOST_CONTENT: self._api_content_hostname}
def request(self,
host,
route_name,
route_style,
arg_data_type,
result_data_type,
error_data_type,
request_arg,
request_binary):
"""
Makes a request to the Dropbox API and in the process validates that
the route argument and result are the expected data types. The
request_arg is converted to JSON based on the arg_data_type. Likewise,
the response is deserialized from JSON and converted to an object based
on the {result,error}_data_type.
:param host: The Dropbox API host to connect to.
:param route_name: The name of the route to invoke.
:param route_style: The style of the route.
:type arg_data_type: :class:`.datatypes.babel_validators.DataType`
:type result_data_type: :class:`.datatypes.babel_validators.DataType`
:type error_data_type: :class:`.datatypes.babel_validators.DataType`
:param request_arg: Argument for the route that conforms to the
validator specified by arg_data_type.
:param request_binary: String or file pointer representing the binary
payload. Use None if there is no binary payload.
:return: The route's result.
"""
serialized_arg = babel_serializers.json_encode(arg_data_type,
request_arg)
res = self.request_json_string_with_retry(host,
route_name,
route_style,
serialized_arg,
request_binary)
decoded_obj_result = json.loads(res.obj_result)
if isinstance(res, RouteResult):
returned_data_type = result_data_type
obj = decoded_obj_result
elif isinstance(res, RouteErrorResult):
returned_data_type = error_data_type
obj = decoded_obj_result['reason']
else:
raise AssertionError('Expected RouteResult or RouteErrorResult, '
'but res is %s' % type(res))
deserialized_result = babel_serializers.json_compat_obj_decode(
returned_data_type, obj, strict=False)
if isinstance(res, RouteErrorResult):
raise ApiError(deserialized_result)
elif route_style == self.ROUTE_STYLE_DOWNLOAD:
return (deserialized_result, res.http_resp)
else:
return deserialized_result
def request_json_object(self,
host,
route_name,
route_style,
request_arg,
request_binary):
"""
Makes a request to the Dropbox API, taking a JSON-serializable Python
object as an argument, and returning one as a response.
:param host: The Dropbox API host to connect to.
:param route_name: The name of the route to invoke.
:param route_style: The style of the route.
:param str request_arg: A JSON-serializable Python object representing
the argument for the route.
:param request_binary: String or file pointer representing the binary
payload. Use None if there is no binary payload.
:return: The route's result as a JSON-serializable Python object.
"""
serialized_arg = json.dumps(request_arg)
res = self.request_json_string_with_retry(host,
route_name,
route_style,
serialized_arg,
request_binary)
# This can throw a ValueError if the result is not deserializable,
# but that would be completely unexpected.
deserialized_result = json.loads(res.obj_result)
if isinstance(res, RouteResult) and res.http_resp is not None:
return (deserialized_result, res.http_resp)
else:
return deserialized_result
def request_json_string_with_retry(self,
host,
route_name,
route_style,
request_json_arg,
request_binary):
"""
See :meth:`request_json_object` for description of parameters.
:param request_json_arg: A string representing the serialized JSON
argument to the route.
"""
attempt = 0
while True:
self._logger.info('Request to %s', route_name)
try:
return self.request_json_string(host,
route_name,
route_style,
request_json_arg,
request_binary)
except (InternalServerError, RateLimitError) as e:
if isinstance(e, InternalServerError):
# Do not count a rate limiting error as an attempt
attempt += 1
if attempt <= self._max_retries_on_error:
# Use exponential backoff
backoff = 2**attempt * random.random()
self._logger.info('HttpError status_code=%s: '
'Retrying in %.1f seconds',
e.status_code, backoff)
time.sleep(backoff)
else:
raise
def request_json_string(self,
host,
func_name,
route_style,
request_json_arg,
request_binary):
"""
See :meth:`request_json_string_with_retry` for description of
parameters.
"""
if host not in self._host_map:
raise ValueError('Unknown value for host: %r' % host)
# Fully qualified hostname
fq_hostname = self._host_map[host]
url = self._get_route_url(fq_hostname, func_name)
headers = {'Authorization': 'Bearer %s' % self._oauth2_access_token,
'User-Agent': self._user_agent}
# The contents of the body of the HTTP request
body = None
# Whether the response should be streamed incrementally, or buffered
# entirely. If stream is True, the caller is responsible for closing
# the HTTP response.
stream = False
if route_style == self.ROUTE_STYLE_RPC:
headers['Content-Type'] = 'application/json'
body = request_json_arg
elif route_style == self.ROUTE_STYLE_DOWNLOAD:
headers['Dropbox-API-Arg'] = request_json_arg
stream = True
elif route_style == self.ROUTE_STYLE_UPLOAD:
headers['Content-Type'] = 'application/octet-stream'
headers['Dropbox-API-Arg'] = request_json_arg
body = request_binary
else:
raise ValueError('Unknown operation style: %r' % route_style)
r = self._session.post(url,
headers=headers,
data=body,
stream=stream,
verify=True,
)
if r.status_code >= 500:
raise InternalServerError(r.status_code, r.text)
elif r.status_code == 400:
raise BadInputError(r.text)
elif r.status_code == 401:
assert r.headers.get('content-type') == 'application/json', (
'Expected content-type to be application/json, got %r' %
r.headers.get('content-type'))
raise AuthError(r.json())
elif r.status_code == 429:
# TODO(kelkabany): Use backoff if provided in response.
raise RateLimitError()
elif 200 <= r.status_code <= 299:
if route_style == self.ROUTE_STYLE_DOWNLOAD:
raw_resp = r.headers['dropbox-api-result']
else:
assert r.headers.get('content-type') == 'application/json', (
'Expected content-type to be application/json, got %r' %
r.headers.get('content-type'))
raw_resp = r.content.decode('utf-8')
if route_style == self.ROUTE_STYLE_DOWNLOAD:
return RouteResult(raw_resp, r)
else:
return RouteResult(raw_resp)
elif r.status_code in (403, 404, 409):
raw_resp = r.content.decode('utf-8')
return RouteErrorResult(raw_resp)
else:
raise HttpError(r.status_code, r.text)
def _get_route_url(self, hostname, route_name):
"""Returns the URL of the route.
:param str hostname: Hostname to make the request to.
:param str route_name: Name of the route.
:rtype: str
"""
return 'https://{hostname}/{version}/{route_name}'.format(
hostname=hostname,
version=Dropbox.API_VERSION,
route_name=route_name,
)
def save_body_to_file(self, download_path, http_resp, chunksize=2**16):
"""
Saves the body of an HTTP response to a file.
:param str download_path: Local path to save data to.
:param http_resp: The HTTP response whose body will be saved.
:type http_resp: :class:`requests.models.Response`
:rtype: None
"""
with open(download_path, 'wb') as f:
for c in http_resp.iter_content(chunksize):
f.write(c)
http_resp.close()
| {
"repo_name": "leonardoo/dropbox-sdk-python",
"path": "dropbox/dropbox.py",
"copies": "1",
"size": "14359",
"license": "mit",
"hash": -2829151590393703400,
"line_mean": 39.4478873239,
"line_max": 87,
"alpha_frac": 0.5508043736,
"autogenerated": false,
"ratio": 4.505491057420772,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5556295431020772,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Dropbox',
]
# TODO(kelkabany): We need to auto populate this as done in the v1 SDK.
__version__ = '3.27'
import contextlib
import json
import logging
import os
import random
import six
import time
import requests
from . import babel_serializers
from .base import DropboxBase
from .exceptions import (
ApiError,
AuthError,
BadInputError,
HttpError,
InternalServerError,
RateLimitError,
)
from .session import pinned_session
class RouteResult(object):
"""The successful result of a call to a route."""
def __init__(self, obj_result, http_resp=None):
"""
:param str obj_result: The result of a route not including the binary
payload portion, if one exists. Must be serialized JSON.
:param requests.models.Response http_resp: A raw HTTP response. It will
be used to stream the binary-body payload of the response.
"""
assert isinstance(obj_result, six.string_types), \
'obj_result: expected string, got %r' % type(obj_result)
if http_resp is not None:
assert isinstance(http_resp, requests.models.Response), \
'http_resp: expected requests.models.Response, got %r' % \
type(http_resp)
self.obj_result = obj_result
self.http_resp = http_resp
class RouteErrorResult(object):
"""The error result of a call to a route."""
def __init__(self, obj_result):
"""
:param str obj_result: The result of a route not including the binary
payload portion, if one exists.
"""
self.obj_result = obj_result
class Dropbox(DropboxBase):
"""
Use this to make requests to the Dropbox API.
"""
API_VERSION = '2-beta-2'
DEFAULT_DOMAIN = 'dropbox.com'
# host for web routes (used for oauth2)
HOST_WEB = 'www'
# Host for RPC-style routes.
HOST_API = 'api'
# Host for upload and download-style routes.
HOST_CONTENT = 'content'
# Download style means that the route argument goes in a Dropbox-API-Arg
# header, and the result comes back in a Dropbox-API-Result header. The
# HTTP response body contains a binary payload.
ROUTE_STYLE_DOWNLOAD = 'download'
# Upload style means that the route argument goes in a Dropbox-API-Arg
# header. The HTTP request body contains a binary payload. The result
# comes back in a Dropbox-API-Result header.
ROUTE_STYLE_UPLOAD = 'upload'
# RPC style means that the argument and result of a route are contained in
# the HTTP body.
ROUTE_STYLE_RPC = 'rpc'
def __init__(self,
oauth2_access_token,
max_connections=8,
max_retries_on_error=4,
user_agent=None):
"""
:param str oauth2_access_token: OAuth2 access token for making client requests.
:param int max_connections: Maximum connection pool size.
:param int max_retries_on_error: On 5xx errors, the number of times to retry.
:param str user_agent: The user agent to use when making requests. This helps
us identify requests coming from your application. We recommend you use
the format "AppName/Version". If set, we append
"/OfficialDropboxPythonV2SDK/__version__" to the user_agent,
"""
self._oauth2_access_token = oauth2_access_token
# We only need as many pool_connections as we have unique hostnames.
self._session = pinned_session(pool_maxsize=max_connections)
self._max_retries_on_error = max_retries_on_error
base_user_agent = 'OfficialDropboxPythonV2SDK/' + __version__
if user_agent:
self._user_agent = '{}/{}'.format(user_agent, base_user_agent)
else:
self._user_agent = base_user_agent
self._logger = logging.getLogger('dropbox')
self._domain = os.environ.get('DROPBOX_DOMAIN', Dropbox.DEFAULT_DOMAIN)
self._api_hostname = os.environ.get(
'DROPBOX_API_HOST', 'api.' + self._domain)
self._api_content_hostname = os.environ.get(
'DROPBOX_API_CONTENT_HOST', 'api-content.' + self._domain)
self._host_map = {self.HOST_API: self._api_hostname,
self.HOST_CONTENT: self._api_content_hostname}
def request(self,
host,
route_name,
route_style,
arg_data_type,
result_data_type,
error_data_type,
request_arg,
request_binary):
"""
Makes a request to the Dropbox API and in the process validates that
the route argument and result are the expected data types. The
request_arg is converted to JSON based on the arg_data_type. Likewise,
the response is deserialized from JSON and converted to an object based
on the {result,error}_data_type.
:param host: The Dropbox API host to connect to.
:param route_name: The name of the route to invoke.
:param route_style: The style of the route.
:type arg_data_type: :class:`.datatypes.babel_validators.DataType`
:type result_data_type: :class:`.datatypes.babel_validators.DataType`
:type error_data_type: :class:`.datatypes.babel_validators.DataType`
:param request_arg: Argument for the route that conforms to the
validator specified by arg_data_type.
:param request_binary: String or file pointer representing the binary
payload. Use None if there is no binary payload.
:return: The route's result.
"""
serialized_arg = babel_serializers.json_encode(arg_data_type,
request_arg)
res = self.request_json_string_with_retry(host,
route_name,
route_style,
serialized_arg,
request_binary)
decoded_obj_result = json.loads(res.obj_result)
if isinstance(res, RouteResult):
returned_data_type = result_data_type
obj = decoded_obj_result
elif isinstance(res, RouteErrorResult):
returned_data_type = error_data_type
obj = decoded_obj_result['error']
else:
raise AssertionError('Expected RouteResult or RouteErrorResult, '
'but res is %s' % type(res))
deserialized_result = babel_serializers.json_compat_obj_decode(
returned_data_type, obj, strict=False)
if isinstance(res, RouteErrorResult):
raise ApiError(deserialized_result)
elif route_style == self.ROUTE_STYLE_DOWNLOAD:
return (deserialized_result, res.http_resp)
else:
return deserialized_result
def request_json_object(self,
host,
route_name,
route_style,
request_arg,
request_binary):
"""
Makes a request to the Dropbox API, taking a JSON-serializable Python
object as an argument, and returning one as a response.
:param host: The Dropbox API host to connect to.
:param route_name: The name of the route to invoke.
:param route_style: The style of the route.
:param str request_arg: A JSON-serializable Python object representing
the argument for the route.
:param request_binary: String or file pointer representing the binary
payload. Use None if there is no binary payload.
:return: The route's result as a JSON-serializable Python object.
"""
serialized_arg = json.dumps(request_arg)
res = self.request_json_string_with_retry(host,
route_name,
route_style,
serialized_arg,
request_binary)
# This can throw a ValueError if the result is not deserializable,
# but that would be completely unexpected.
deserialized_result = json.loads(res.obj_result)
if isinstance(res, RouteResult) and res.http_resp is not None:
return (deserialized_result, res.http_resp)
else:
return deserialized_result
def request_json_string_with_retry(self,
host,
route_name,
route_style,
request_json_arg,
request_binary):
"""
See :meth:`request_json_object` for description of parameters.
:param request_json_arg: A string representing the serialized JSON
argument to the route.
"""
attempt = 0
while True:
self._logger.info('Request to %s', route_name)
try:
return self.request_json_string(host,
route_name,
route_style,
request_json_arg,
request_binary)
except (InternalServerError, RateLimitError) as e:
if isinstance(e, InternalServerError):
# Do not count a rate limiting error as an attempt
attempt += 1
if attempt <= self._max_retries_on_error:
# Use exponential backoff
backoff = 2**attempt * random.random()
self._logger.info('HttpError status_code=%s: '
'Retrying in %.1f seconds',
e.status_code, backoff)
time.sleep(backoff)
else:
raise
def request_json_string(self,
host,
func_name,
route_style,
request_json_arg,
request_binary):
"""
See :meth:`request_json_string_with_retry` for description of
parameters.
"""
if host not in self._host_map:
raise ValueError('Unknown value for host: %r' % host)
# Fully qualified hostname
fq_hostname = self._host_map[host]
url = self._get_route_url(fq_hostname, func_name)
headers = {'Authorization': 'Bearer %s' % self._oauth2_access_token,
'User-Agent': self._user_agent}
# The contents of the body of the HTTP request
body = None
# Whether the response should be streamed incrementally, or buffered
# entirely. If stream is True, the caller is responsible for closing
# the HTTP response.
stream = False
if route_style == self.ROUTE_STYLE_RPC:
headers['Content-Type'] = 'application/json'
body = request_json_arg
elif route_style == self.ROUTE_STYLE_DOWNLOAD:
headers['Dropbox-API-Arg'] = request_json_arg
stream = True
elif route_style == self.ROUTE_STYLE_UPLOAD:
headers['Content-Type'] = 'application/octet-stream'
headers['Dropbox-API-Arg'] = request_json_arg
body = request_binary
else:
raise ValueError('Unknown operation style: %r' % route_style)
r = self._session.post(url,
headers=headers,
data=body,
stream=stream,
verify=True,
)
if r.status_code >= 500:
raise InternalServerError(r.status_code, r.text)
elif r.status_code == 400:
raise BadInputError(r.text)
elif r.status_code == 401:
assert r.headers.get('content-type') == 'application/json', (
'Expected content-type to be application/json, got %r' %
r.headers.get('content-type'))
raise AuthError(r.json())
elif r.status_code == 429:
# TODO(kelkabany): Use backoff if provided in response.
raise RateLimitError()
elif 200 <= r.status_code <= 299:
if route_style == self.ROUTE_STYLE_DOWNLOAD:
raw_resp = r.headers['dropbox-api-result']
else:
assert r.headers.get('content-type') == 'application/json', (
'Expected content-type to be application/json, got %r' %
r.headers.get('content-type'))
raw_resp = r.content.decode('utf-8')
if route_style == self.ROUTE_STYLE_DOWNLOAD:
return RouteResult(raw_resp, r)
else:
return RouteResult(raw_resp)
elif r.status_code in (403, 404, 409):
raw_resp = r.content.decode('utf-8')
return RouteErrorResult(raw_resp)
else:
raise HttpError(r.status_code, r.text)
def _get_route_url(self, hostname, route_name):
"""Returns the URL of the route.
:param str hostname: Hostname to make the request to.
:param str route_name: Name of the route.
:rtype: str
"""
return 'https://{hostname}/{version}/{route_name}'.format(
hostname=hostname,
version=Dropbox.API_VERSION,
route_name=route_name,
)
def _save_body_to_file(self, download_path, http_resp, chunksize=2**16):
"""
Saves the body of an HTTP response to a file.
:param str download_path: Local path to save data to.
:param http_resp: The HTTP response whose body will be saved.
:type http_resp: :class:`requests.models.Response`
:rtype: None
"""
with open(download_path, 'wb') as f:
with contextlib.closing(http_resp):
for c in http_resp.iter_content(chunksize):
f.write(c)
| {
"repo_name": "smarx/dropbox-sdk-python",
"path": "dropbox/dropbox.py",
"copies": "1",
"size": "14406",
"license": "mit",
"hash": 7043735093690148000,
"line_mean": 39.5802816901,
"line_max": 87,
"alpha_frac": 0.5511592392,
"autogenerated": false,
"ratio": 4.510331872260489,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5561491111460488,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Dropbox',
]
# TODO(kelkabany): We need to auto populate this as done in the v1 SDK.
__version__ = '3.33'
import contextlib
import json
import logging
import os
import random
import six
import time
import requests
from . import babel_serializers
from .base import DropboxBase
from .exceptions import (
ApiError,
AuthError,
BadInputError,
HttpError,
InternalServerError,
RateLimitError,
)
from .session import pinned_session
class RouteResult(object):
"""The successful result of a call to a route."""
def __init__(self, obj_result, http_resp=None):
"""
:param str obj_result: The result of a route not including the binary
payload portion, if one exists. Must be serialized JSON.
:param requests.models.Response http_resp: A raw HTTP response. It will
be used to stream the binary-body payload of the response.
"""
assert isinstance(obj_result, six.string_types), \
'obj_result: expected string, got %r' % type(obj_result)
if http_resp is not None:
assert isinstance(http_resp, requests.models.Response), \
'http_resp: expected requests.models.Response, got %r' % \
type(http_resp)
self.obj_result = obj_result
self.http_resp = http_resp
class RouteErrorResult(object):
"""The error result of a call to a route."""
def __init__(self, obj_result):
"""
:param str obj_result: The result of a route not including the binary
payload portion, if one exists.
"""
self.obj_result = obj_result
class Dropbox(DropboxBase):
"""
Use this to make requests to the Dropbox API.
"""
API_VERSION = '2-beta-2'
DEFAULT_DOMAIN = 'dropbox.com'
# host for web routes (used for oauth2)
HOST_WEB = 'www'
# Host for RPC-style routes.
HOST_API = 'api'
# Host for upload and download-style routes.
HOST_CONTENT = 'content'
# Host for longpoll routes.
HOST_NOTIFY = 'notify'
# Download style means that the route argument goes in a Dropbox-API-Arg
# header, and the result comes back in a Dropbox-API-Result header. The
# HTTP response body contains a binary payload.
ROUTE_STYLE_DOWNLOAD = 'download'
# Upload style means that the route argument goes in a Dropbox-API-Arg
# header. The HTTP request body contains a binary payload. The result
# comes back in a Dropbox-API-Result header.
ROUTE_STYLE_UPLOAD = 'upload'
# RPC style means that the argument and result of a route are contained in
# the HTTP body.
ROUTE_STYLE_RPC = 'rpc'
def __init__(self,
oauth2_access_token,
max_connections=8,
max_retries_on_error=4,
user_agent=None,
proxies=None):
"""
:param str oauth2_access_token: OAuth2 access token for making client
requests.
:param int max_connections: Maximum connection pool size.
:param int max_retries_on_error: On 5xx errors, the number of times to
retry.
:param str user_agent: The user agent to use when making requests. This
helps us identify requests coming from your application. We
recommend you use the format "AppName/Version". If set, we append
"/OfficialDropboxPythonV2SDK/__version__" to the user_agent,
:param dict proxies: See the `requests module
<http://docs.python-requests.org/en/latest/user/advanced/#proxies>`_
for more details.
"""
assert len(oauth2_access_token) > 0, \
'OAuth2 access token cannot be empty.'
self._oauth2_access_token = oauth2_access_token
# We only need as many pool_connections as we have unique hostnames.
self._session = pinned_session(pool_maxsize=max_connections)
if proxies:
self._session.proxies = proxies
self._max_retries_on_error = max_retries_on_error
base_user_agent = 'OfficialDropboxPythonV2SDK/' + __version__
if user_agent:
self._user_agent = '{}/{}'.format(user_agent, base_user_agent)
else:
self._user_agent = base_user_agent
self._logger = logging.getLogger('dropbox')
self._domain = os.environ.get('DROPBOX_DOMAIN', Dropbox.DEFAULT_DOMAIN)
self._api_hostname = os.environ.get(
'DROPBOX_API_HOST', 'api.' + self._domain)
self._api_content_hostname = os.environ.get(
'DROPBOX_API_CONTENT_HOST', 'api-content.' + self._domain)
self._api_notify_hostname = os.environ.get(
'DROPBOX_API_NOTIFY_HOST', 'api-notify.' + self._domain)
self._host_map = {self.HOST_API: self._api_hostname,
self.HOST_CONTENT: self._api_content_hostname,
self.HOST_NOTIFY: self._api_notify_hostname}
def request(self,
host,
route_name,
route_style,
arg_data_type,
result_data_type,
error_data_type,
request_arg,
request_binary):
"""
Makes a request to the Dropbox API and in the process validates that
the route argument and result are the expected data types. The
request_arg is converted to JSON based on the arg_data_type. Likewise,
the response is deserialized from JSON and converted to an object based
on the {result,error}_data_type.
:param host: The Dropbox API host to connect to.
:param route_name: The name of the route to invoke.
:param route_style: The style of the route.
:type arg_data_type: :class:`.datatypes.babel_validators.DataType`
:type result_data_type: :class:`.datatypes.babel_validators.DataType`
:type error_data_type: :class:`.datatypes.babel_validators.DataType`
:param request_arg: Argument for the route that conforms to the
validator specified by arg_data_type.
:param request_binary: String or file pointer representing the binary
payload. Use None if there is no binary payload.
:return: The route's result.
"""
serialized_arg = babel_serializers.json_encode(arg_data_type,
request_arg)
res = self.request_json_string_with_retry(host,
route_name,
route_style,
serialized_arg,
request_binary)
decoded_obj_result = json.loads(res.obj_result)
if isinstance(res, RouteResult):
returned_data_type = result_data_type
obj = decoded_obj_result
elif isinstance(res, RouteErrorResult):
returned_data_type = error_data_type
obj = decoded_obj_result['error']
user_message = decoded_obj_result.get('user_message')
user_message_text = user_message and user_message.get('text')
user_message_locale = user_message and user_message.get('locale')
else:
raise AssertionError('Expected RouteResult or RouteErrorResult, '
'but res is %s' % type(res))
deserialized_result = babel_serializers.json_compat_obj_decode(
returned_data_type, obj, strict=False)
if isinstance(res, RouteErrorResult):
raise ApiError(deserialized_result,
user_message_text,
user_message_locale)
elif route_style == self.ROUTE_STYLE_DOWNLOAD:
return (deserialized_result, res.http_resp)
else:
return deserialized_result
def request_json_object(self,
host,
route_name,
route_style,
request_arg,
request_binary):
"""
Makes a request to the Dropbox API, taking a JSON-serializable Python
object as an argument, and returning one as a response.
:param host: The Dropbox API host to connect to.
:param route_name: The name of the route to invoke.
:param route_style: The style of the route.
:param str request_arg: A JSON-serializable Python object representing
the argument for the route.
:param request_binary: String or file pointer representing the binary
payload. Use None if there is no binary payload.
:return: The route's result as a JSON-serializable Python object.
"""
serialized_arg = json.dumps(request_arg)
res = self.request_json_string_with_retry(host,
route_name,
route_style,
serialized_arg,
request_binary)
# This can throw a ValueError if the result is not deserializable,
# but that would be completely unexpected.
deserialized_result = json.loads(res.obj_result)
if isinstance(res, RouteResult) and res.http_resp is not None:
return (deserialized_result, res.http_resp)
else:
return deserialized_result
def request_json_string_with_retry(self,
host,
route_name,
route_style,
request_json_arg,
request_binary):
"""
See :meth:`request_json_object` for description of parameters.
:param request_json_arg: A string representing the serialized JSON
argument to the route.
"""
attempt = 0
while True:
self._logger.info('Request to %s', route_name)
try:
return self.request_json_string(host,
route_name,
route_style,
request_json_arg,
request_binary)
except (InternalServerError, RateLimitError) as e:
if isinstance(e, InternalServerError):
# Do not count a rate limiting error as an attempt
attempt += 1
if attempt <= self._max_retries_on_error:
# Use exponential backoff
backoff = 2**attempt * random.random()
self._logger.info('HttpError status_code=%s: '
'Retrying in %.1f seconds',
e.status_code, backoff)
time.sleep(backoff)
else:
raise
def request_json_string(self,
host,
func_name,
route_style,
request_json_arg,
request_binary):
"""
See :meth:`request_json_string_with_retry` for description of
parameters.
"""
if host not in self._host_map:
raise ValueError('Unknown value for host: %r' % host)
# Fully qualified hostname
fq_hostname = self._host_map[host]
url = self._get_route_url(fq_hostname, func_name)
headers = {'Authorization': 'Bearer %s' % self._oauth2_access_token,
'User-Agent': self._user_agent}
# The contents of the body of the HTTP request
body = None
# Whether the response should be streamed incrementally, or buffered
# entirely. If stream is True, the caller is responsible for closing
# the HTTP response.
stream = False
if route_style == self.ROUTE_STYLE_RPC:
headers['Content-Type'] = 'application/json'
body = request_json_arg
elif route_style == self.ROUTE_STYLE_DOWNLOAD:
headers['Dropbox-API-Arg'] = request_json_arg
stream = True
elif route_style == self.ROUTE_STYLE_UPLOAD:
headers['Content-Type'] = 'application/octet-stream'
headers['Dropbox-API-Arg'] = request_json_arg
body = request_binary
else:
raise ValueError('Unknown operation style: %r' % route_style)
r = self._session.post(url,
headers=headers,
data=body,
stream=stream,
verify=True,
)
if r.status_code >= 500:
raise InternalServerError(r.status_code, r.text)
elif r.status_code == 400:
raise BadInputError(r.text)
elif r.status_code == 401:
assert r.headers.get('content-type') == 'application/json', (
'Expected content-type to be application/json, got %r' %
r.headers.get('content-type'))
raise AuthError(r.json())
elif r.status_code == 429:
# TODO(kelkabany): Use backoff if provided in response.
raise RateLimitError()
elif 200 <= r.status_code <= 299:
if route_style == self.ROUTE_STYLE_DOWNLOAD:
raw_resp = r.headers['dropbox-api-result']
else:
assert r.headers.get('content-type') == 'application/json', (
'Expected content-type to be application/json, got %r' %
r.headers.get('content-type'))
raw_resp = r.content.decode('utf-8')
if route_style == self.ROUTE_STYLE_DOWNLOAD:
return RouteResult(raw_resp, r)
else:
return RouteResult(raw_resp)
elif r.status_code in (403, 404, 409):
raw_resp = r.content.decode('utf-8')
return RouteErrorResult(raw_resp)
else:
raise HttpError(r.status_code, r.text)
def _get_route_url(self, hostname, route_name):
"""Returns the URL of the route.
:param str hostname: Hostname to make the request to.
:param str route_name: Name of the route.
:rtype: str
"""
return 'https://{hostname}/{version}/{route_name}'.format(
hostname=hostname,
version=Dropbox.API_VERSION,
route_name=route_name,
)
def _save_body_to_file(self, download_path, http_resp, chunksize=2**16):
"""
Saves the body of an HTTP response to a file.
:param str download_path: Local path to save data to.
:param http_resp: The HTTP response whose body will be saved.
:type http_resp: :class:`requests.models.Response`
:rtype: None
"""
with open(download_path, 'wb') as f:
with contextlib.closing(http_resp):
for c in http_resp.iter_content(chunksize):
f.write(c)
| {
"repo_name": "ewjoachim/dropbox-sdk-python",
"path": "dropbox/dropbox.py",
"copies": "1",
"size": "15353",
"license": "mit",
"hash": 5948928332503049000,
"line_mean": 39.8324468085,
"line_max": 80,
"alpha_frac": 0.5509672377,
"autogenerated": false,
"ratio": 4.498388514503369,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003472032965114705,
"num_lines": 376
} |
__all__ = [
'Dropbox',
]
# TODO(kelkabany): We need to auto populate this as done in the v1 SDK.
__version__ = '4.0'
import contextlib
import json
import logging
import os
import random
import six
import time
import requests
from . import babel_serializers
from .base import DropboxBase
from .exceptions import (
ApiError,
AuthError,
BadInputError,
HttpError,
InternalServerError,
RateLimitError,
)
from .session import pinned_session
class RouteResult(object):
"""The successful result of a call to a route."""
def __init__(self, obj_result, http_resp=None):
"""
:param str obj_result: The result of a route not including the binary
payload portion, if one exists. Must be serialized JSON.
:param requests.models.Response http_resp: A raw HTTP response. It will
be used to stream the binary-body payload of the response.
"""
assert isinstance(obj_result, six.string_types), \
'obj_result: expected string, got %r' % type(obj_result)
if http_resp is not None:
assert isinstance(http_resp, requests.models.Response), \
'http_resp: expected requests.models.Response, got %r' % \
type(http_resp)
self.obj_result = obj_result
self.http_resp = http_resp
class RouteErrorResult(object):
"""The error result of a call to a route."""
def __init__(self, obj_result):
"""
:param str obj_result: The result of a route not including the binary
payload portion, if one exists.
"""
self.obj_result = obj_result
class Dropbox(DropboxBase):
"""
Use this to make requests to the Dropbox API.
"""
API_VERSION = '2'
DEFAULT_DOMAIN = '.dropboxapi.com'
# Host for RPC-style routes.
HOST_API = 'api'
# Host for upload and download-style routes.
HOST_CONTENT = 'content'
# Host for longpoll routes.
HOST_NOTIFY = 'notify'
# Download style means that the route argument goes in a Dropbox-API-Arg
# header, and the result comes back in a Dropbox-API-Result header. The
# HTTP response body contains a binary payload.
ROUTE_STYLE_DOWNLOAD = 'download'
# Upload style means that the route argument goes in a Dropbox-API-Arg
# header. The HTTP request body contains a binary payload. The result
# comes back in a Dropbox-API-Result header.
ROUTE_STYLE_UPLOAD = 'upload'
# RPC style means that the argument and result of a route are contained in
# the HTTP body.
ROUTE_STYLE_RPC = 'rpc'
def __init__(self,
oauth2_access_token,
max_connections=8,
max_retries_on_error=4,
user_agent=None,
proxies=None):
"""
:param str oauth2_access_token: OAuth2 access token for making client
requests.
:param int max_connections: Maximum connection pool size.
:param int max_retries_on_error: On 5xx errors, the number of times to
retry.
:param str user_agent: The user agent to use when making requests. This
helps us identify requests coming from your application. We
recommend you use the format "AppName/Version". If set, we append
"/OfficialDropboxPythonV2SDK/__version__" to the user_agent,
:param dict proxies: See the `requests module
<http://docs.python-requests.org/en/latest/user/advanced/#proxies>`_
for more details.
"""
assert len(oauth2_access_token) > 0, \
'OAuth2 access token cannot be empty.'
self._oauth2_access_token = oauth2_access_token
# We only need as many pool_connections as we have unique hostnames.
self._session = pinned_session(pool_maxsize=max_connections)
if proxies:
self._session.proxies = proxies
self._max_retries_on_error = max_retries_on_error
base_user_agent = 'OfficialDropboxPythonV2SDK/' + __version__
if user_agent:
self._user_agent = '{}/{}'.format(user_agent, base_user_agent)
else:
self._user_agent = base_user_agent
self._logger = logging.getLogger('dropbox')
self._domain = os.environ.get('DROPBOX_DOMAIN', Dropbox.DEFAULT_DOMAIN)
self._api_hostname = os.environ.get(
'DROPBOX_API_HOST', 'api' + self._domain)
self._api_content_hostname = os.environ.get(
'DROPBOX_API_CONTENT_HOST', 'content' + self._domain)
self._api_notify_hostname = os.environ.get(
'DROPBOX_API_NOTIFY_HOST', 'notify' + self._domain)
self._host_map = {self.HOST_API: self._api_hostname,
self.HOST_CONTENT: self._api_content_hostname,
self.HOST_NOTIFY: self._api_notify_hostname}
def request(self,
host,
route_name,
route_style,
arg_data_type,
result_data_type,
error_data_type,
request_arg,
request_binary):
"""
Makes a request to the Dropbox API and in the process validates that
the route argument and result are the expected data types. The
request_arg is converted to JSON based on the arg_data_type. Likewise,
the response is deserialized from JSON and converted to an object based
on the {result,error}_data_type.
:param host: The Dropbox API host to connect to.
:param route_name: The name of the route to invoke.
:param route_style: The style of the route.
:type arg_data_type: :class:`.datatypes.babel_validators.DataType`
:type result_data_type: :class:`.datatypes.babel_validators.DataType`
:type error_data_type: :class:`.datatypes.babel_validators.DataType`
:param request_arg: Argument for the route that conforms to the
validator specified by arg_data_type.
:param request_binary: String or file pointer representing the binary
payload. Use None if there is no binary payload.
:return: The route's result.
"""
serialized_arg = babel_serializers.json_encode(arg_data_type,
request_arg)
res = self.request_json_string_with_retry(host,
route_name,
route_style,
serialized_arg,
request_binary)
decoded_obj_result = json.loads(res.obj_result)
if isinstance(res, RouteResult):
returned_data_type = result_data_type
obj = decoded_obj_result
elif isinstance(res, RouteErrorResult):
returned_data_type = error_data_type
obj = decoded_obj_result['error']
user_message = decoded_obj_result.get('user_message')
user_message_text = user_message and user_message.get('text')
user_message_locale = user_message and user_message.get('locale')
else:
raise AssertionError('Expected RouteResult or RouteErrorResult, '
'but res is %s' % type(res))
deserialized_result = babel_serializers.json_compat_obj_decode(
returned_data_type, obj, strict=False)
if isinstance(res, RouteErrorResult):
raise ApiError(deserialized_result,
user_message_text,
user_message_locale)
elif route_style == self.ROUTE_STYLE_DOWNLOAD:
return (deserialized_result, res.http_resp)
else:
return deserialized_result
def request_json_object(self,
host,
route_name,
route_style,
request_arg,
request_binary):
"""
Makes a request to the Dropbox API, taking a JSON-serializable Python
object as an argument, and returning one as a response.
:param host: The Dropbox API host to connect to.
:param route_name: The name of the route to invoke.
:param route_style: The style of the route.
:param str request_arg: A JSON-serializable Python object representing
the argument for the route.
:param request_binary: String or file pointer representing the binary
payload. Use None if there is no binary payload.
:return: The route's result as a JSON-serializable Python object.
"""
serialized_arg = json.dumps(request_arg)
res = self.request_json_string_with_retry(host,
route_name,
route_style,
serialized_arg,
request_binary)
# This can throw a ValueError if the result is not deserializable,
# but that would be completely unexpected.
deserialized_result = json.loads(res.obj_result)
if isinstance(res, RouteResult) and res.http_resp is not None:
return (deserialized_result, res.http_resp)
else:
return deserialized_result
def request_json_string_with_retry(self,
host,
route_name,
route_style,
request_json_arg,
request_binary):
"""
See :meth:`request_json_object` for description of parameters.
:param request_json_arg: A string representing the serialized JSON
argument to the route.
"""
attempt = 0
while True:
self._logger.info('Request to %s', route_name)
try:
return self.request_json_string(host,
route_name,
route_style,
request_json_arg,
request_binary)
except (InternalServerError, RateLimitError) as e:
if isinstance(e, InternalServerError):
# Do not count a rate limiting error as an attempt
attempt += 1
if attempt <= self._max_retries_on_error:
# Use exponential backoff
backoff = 2**attempt * random.random()
self._logger.info('HttpError status_code=%s: '
'Retrying in %.1f seconds',
e.status_code, backoff)
time.sleep(backoff)
else:
raise
def request_json_string(self,
host,
func_name,
route_style,
request_json_arg,
request_binary):
"""
See :meth:`request_json_string_with_retry` for description of
parameters.
"""
if host not in self._host_map:
raise ValueError('Unknown value for host: %r' % host)
# Fully qualified hostname
fq_hostname = self._host_map[host]
url = self._get_route_url(fq_hostname, func_name)
headers = {'User-Agent': self._user_agent}
if host != self.HOST_NOTIFY:
headers['Authorization'] = 'Bearer %s' % self._oauth2_access_token
# The contents of the body of the HTTP request
body = None
# Whether the response should be streamed incrementally, or buffered
# entirely. If stream is True, the caller is responsible for closing
# the HTTP response.
stream = False
if route_style == self.ROUTE_STYLE_RPC:
headers['Content-Type'] = 'application/json'
body = request_json_arg
elif route_style == self.ROUTE_STYLE_DOWNLOAD:
headers['Dropbox-API-Arg'] = request_json_arg
stream = True
elif route_style == self.ROUTE_STYLE_UPLOAD:
headers['Content-Type'] = 'application/octet-stream'
headers['Dropbox-API-Arg'] = request_json_arg
body = request_binary
else:
raise ValueError('Unknown operation style: %r' % route_style)
r = self._session.post(url,
headers=headers,
data=body,
stream=stream,
verify=True,
)
if r.status_code >= 500:
raise InternalServerError(r.status_code, r.text)
elif r.status_code == 400:
raise BadInputError(r.text)
elif r.status_code == 401:
assert r.headers.get('content-type') == 'application/json', (
'Expected content-type to be application/json, got %r' %
r.headers.get('content-type'))
raise AuthError(r.json())
elif r.status_code == 429:
# TODO(kelkabany): Use backoff if provided in response.
raise RateLimitError()
elif 200 <= r.status_code <= 299:
if route_style == self.ROUTE_STYLE_DOWNLOAD:
raw_resp = r.headers['dropbox-api-result']
else:
assert r.headers.get('content-type') == 'application/json', (
'Expected content-type to be application/json, got %r' %
r.headers.get('content-type'))
raw_resp = r.content.decode('utf-8')
if route_style == self.ROUTE_STYLE_DOWNLOAD:
return RouteResult(raw_resp, r)
else:
return RouteResult(raw_resp)
elif r.status_code in (403, 404, 409):
raw_resp = r.content.decode('utf-8')
return RouteErrorResult(raw_resp)
else:
raise HttpError(r.status_code, r.text)
def _get_route_url(self, hostname, route_name):
"""Returns the URL of the route.
:param str hostname: Hostname to make the request to.
:param str route_name: Name of the route.
:rtype: str
"""
return 'https://{hostname}/{version}/{route_name}'.format(
hostname=hostname,
version=Dropbox.API_VERSION,
route_name=route_name,
)
def _save_body_to_file(self, download_path, http_resp, chunksize=2**16):
"""
Saves the body of an HTTP response to a file.
:param str download_path: Local path to save data to.
:param http_resp: The HTTP response whose body will be saved.
:type http_resp: :class:`requests.models.Response`
:rtype: None
"""
with open(download_path, 'wb') as f:
with contextlib.closing(http_resp):
for c in http_resp.iter_content(chunksize):
f.write(c)
| {
"repo_name": "mafrosis/dropbox-python-sdk",
"path": "dropbox/dropbox.py",
"copies": "1",
"size": "15311",
"license": "mit",
"hash": -1393481337625033000,
"line_mean": 39.9385026738,
"line_max": 80,
"alpha_frac": 0.5511070472,
"autogenerated": false,
"ratio": 4.5072122460995,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5558319293299501,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Dropbox',
]
# TODO(kelkabany): We need to auto populate this as done in the v1 SDK.
__version__ = '<insert-version-number-here>'
import json
import logging
import os
import pkg_resources
import random
import six
import ssl
import time
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.poolmanager import PoolManager
from . import babel_serializers
from .base import DropboxBase
from .exceptions import (
ApiError,
AuthError,
BadInputError,
HttpError,
InternalServerError,
RateLimitError,
)
_TRUSTED_CERT_FILE = pkg_resources.resource_filename(__name__, 'trusted-certs.crt')
class RouteResult(object):
"""The successful result of a call to a route."""
def __init__(self, obj_result, http_resp=None):
"""
:param str obj_result: The result of a route not including the binary
payload portion, if one exists. Must be serialized JSON.
:param requests.models.Response http_resp: A raw HTTP response. It will
be used to stream the binary-body payload of the response.
"""
assert isinstance(obj_result, six.string_types), \
'obj_result: expected string, got %r' % type(obj_result)
if http_resp is not None:
assert isinstance(http_resp, requests.models.Response), \
'http_resp: expected requests.models.Response, got %r' % \
type(http_resp)
self.obj_result = obj_result
self.http_resp = http_resp
class RouteErrorResult(object):
"""The error result of a call to a route."""
def __init__(self, obj_result):
"""
:param str obj_result: The result of a route not including the binary
payload portion, if one exists.
"""
self.obj_result = obj_result
# TODO(kelkabany): We probably only want to instantiate this once so that even
# if multiple Dropbox objects are instantiated, they all share the same pool.
class _SSLAdapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(num_pools=connections,
maxsize=maxsize,
block=block,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=_TRUSTED_CERT_FILE,
ssl_version=ssl.PROTOCOL_TLSv1)
class Dropbox(DropboxBase):
"""
Use this to make requests to the Dropbox API.
"""
API_VERSION = '2-beta-2'
DEFAULT_DOMAIN = 'dropbox.com'
# Host for RPC-style routes.
HOST_API = 'api'
# Host for upload and download-style routes.
HOST_CONTENT = 'content'
# Download style means that the route argument goes in a Dropbox-API-Arg
# header, and the result comes back in a Dropbox-API-Result header. The
# HTTP response body contains a binary payload.
ROUTE_STYLE_DOWNLOAD = 'download'
# Upload style means that the route argument goes in a Dropbox-API-Arg
# header. The HTTP request body contains a binary payload. The result
# comes back in a Dropbox-API-Result header.
ROUTE_STYLE_UPLOAD = 'upload'
# RPC style means that the argument and result of a route are contained in
# the HTTP body.
ROUTE_STYLE_RPC = 'rpc'
def __init__(self,
oauth2_access_token,
max_connections=8,
max_retries_on_error=4,
user_agent=None):
"""
:param str oauth2_access_token: OAuth2 access token for making client requests.
:param int max_connections: Maximum connection pool size.
:param int max_retries_on_error: On 5xx errors, the number of times to retry.
:param str user_agent: The user agent to use when making requests. This helps
us identify requests coming from your application. We recommend you use
the format "AppName/Version". If set, we append
"/OfficialDropboxPythonV2SDK/__version__" to the user_agent,
"""
self._oauth2_access_token = oauth2_access_token
# We only need as many pool_connections as we have unique hostnames.
http_adapter = _SSLAdapter(pool_connections=4,
pool_maxsize=max_connections)
# Use a single session for connection re-use.
self._session = requests.session()
self._session.mount('https://', http_adapter)
self._max_retries_on_error = max_retries_on_error
base_user_agent = 'OfficialDropboxPythonV2SDK/' + __version__
if user_agent:
self._user_agent = '{}/{}'.format(user_agent, base_user_agent)
else:
self._user_agent = base_user_agent
self._logger = logging.getLogger('dropbox')
self._domain = os.environ.get('DROPBOX_DOMAIN', Dropbox.DEFAULT_DOMAIN)
self._api_hostname = os.environ.get(
'DBOPBOX_API_HOST', 'api.' + self._domain)
self._api_content_hostname = os.environ.get(
'DBOPBOX_API_CONTENT_HOST', 'api-content.' + self._domain)
self._host_map = {self.HOST_API: self._api_hostname,
self.HOST_CONTENT: self._api_content_hostname}
def request(self,
host,
route_name,
route_style,
arg_data_type,
result_data_type,
error_data_type,
request_arg,
request_binary):
"""
Makes a request to the Dropbox API and in the process validates that
the route argument and result are the expected data types. The
request_arg is converted to JSON based on the arg_data_type. Likewise,
the response is deserialized from JSON and converted to an object based
on the {result,error}_data_type.
:param host: The Dropbox API host to connect to.
:param route_name: The name of the route to invoke.
:param route_style: The style of the route.
:type arg_data_type: :class:`.datatypes.babel_validators.DataType`
:type result_data_type: :class:`.datatypes.babel_validators.DataType`
:type error_data_type: :class:`.datatypes.babel_validators.DataType`
:param request_arg: Argument for the route that conforms to the
validator specified by arg_data_type.
:param request_binary: String or file pointer representing the binary
payload. Use None if there is no binary payload.
:return: The route's result.
"""
serialized_arg = babel_serializers.json_encode(arg_data_type,
request_arg)
res = self.request_json_string_with_retry(host,
route_name,
route_style,
serialized_arg,
request_binary)
decoded_obj_result = json.loads(res.obj_result)
if isinstance(res, RouteResult):
returned_data_type = result_data_type
obj = decoded_obj_result
elif isinstance(res, RouteErrorResult):
returned_data_type = error_data_type
obj = decoded_obj_result['reason']
else:
raise AssertionError('Expected RouteResult or RouteErrorResult, '
'but res is %s' % type(res))
deserialized_result = babel_serializers.json_compat_obj_decode(
returned_data_type, obj, strict=False)
if isinstance(res, RouteErrorResult):
raise ApiError(deserialized_result)
elif route_style == self.ROUTE_STYLE_DOWNLOAD:
return (deserialized_result, res.http_resp)
else:
return deserialized_result
def request_json_object(self,
host,
route_name,
route_style,
request_arg,
request_binary):
"""
Makes a request to the Dropbox API, taking a JSON-serializable Python
object as an argument, and returning one as a response.
:param host: The Dropbox API host to connect to.
:param route_name: The name of the route to invoke.
:param route_style: The style of the route.
:param str request_arg: A JSON-serializable Python object representing
the argument for the route.
:param request_binary: String or file pointer representing the binary
payload. Use None if there is no binary payload.
:return: The route's result as a JSON-serializable Python object.
"""
serialized_arg = json.dumps(request_arg)
res = self.request_json_string_with_retry(host,
route_name,
route_style,
serialized_arg,
request_binary)
# This can throw a ValueError if the result is not deserializable,
# but that would be completely unexpected.
deserialized_result = json.loads(res.obj_result)
if isinstance(res, RouteResult) and res.http_resp is not None:
return (deserialized_result, res.http_resp)
else:
return deserialized_result
def request_json_string_with_retry(self,
host,
route_name,
route_style,
request_json_arg,
request_binary):
"""
See :meth:`request_json_object` for description of parameters.
:param request_json_arg: A string representing the serialized JSON
argument to the route.
"""
attempt = 0
while True:
self._logger.info('Request to %s', route_name)
try:
return self.request_json_string(host,
route_name,
route_style,
request_json_arg,
request_binary)
except (InternalServerError, RateLimitError) as e:
if isinstance(e, InternalServerError):
# Do not count a rate limiting error as an attempt
attempt += 1
if attempt <= self._max_retries_on_error:
# Use exponential backoff
backoff = 2**attempt * random.random()
self._logger.info('HttpError status_code=%s: '
'Retrying in %.1f seconds',
e.status_code, backoff)
time.sleep(backoff)
else:
raise
def request_json_string(self,
host,
func_name,
route_style,
request_json_arg,
request_binary):
"""
See :meth:`request_json_string_with_retry` for description of
parameters.
"""
if host not in self._host_map:
raise ValueError('Unknown value for host: %r' % host)
# Fully qualified hostname
fq_hostname = self._host_map[host]
url = self._get_route_url(fq_hostname, func_name)
headers = {'Authorization': 'Bearer %s' % self._oauth2_access_token,
'User-Agent': self._user_agent}
# The contents of the body of the HTTP request
body = None
# Whether the response should be streamed incrementally, or buffered
# entirely. If stream is True, the caller is responsible for closing
# the HTTP response.
stream = False
if route_style == self.ROUTE_STYLE_RPC:
headers['Content-Type'] = 'application/json'
body = request_json_arg
elif route_style == self.ROUTE_STYLE_DOWNLOAD:
headers['Dropbox-API-Arg'] = request_json_arg
stream = True
elif route_style == self.ROUTE_STYLE_UPLOAD:
headers['Content-Type'] = 'application/octet-stream'
headers['Dropbox-API-Arg'] = request_json_arg
body = request_binary
else:
raise ValueError('Unknown operation style: %r' % route_style)
r = self._session.post(url,
headers=headers,
data=body,
stream=stream,
verify=True,
)
if r.status_code >= 500:
raise InternalServerError(r.status_code, r.text)
elif r.status_code == 400:
raise BadInputError(r.text)
elif r.status_code == 401:
assert r.headers.get('content-type') == 'application/json', (
'Expected content-type to be application/json, got %r' %
r.headers.get('content-type'))
raise AuthError(r.json())
elif r.status_code == 429:
# TODO(kelkabany): Use backoff if provided in response.
raise RateLimitError()
elif 200 <= r.status_code <= 299:
if route_style == self.ROUTE_STYLE_DOWNLOAD:
raw_resp = r.headers['dropbox-api-result']
else:
assert r.headers.get('content-type') == 'application/json', (
'Expected content-type to be application/json, got %r' %
r.headers.get('content-type'))
raw_resp = r.content.decode('utf-8')
if route_style == self.ROUTE_STYLE_DOWNLOAD:
return RouteResult(raw_resp, r)
else:
return RouteResult(raw_resp)
elif r.status_code == 409:
raw_resp = r.content.decode('utf-8')
return RouteErrorResult(raw_resp)
else:
raise HttpError(r.status_code, r.text)
def _get_route_url(self, hostname, route_name):
"""Returns the URL of the route.
:param str hostname: Hostname to make the request to.
:param str route_name: Name of the route.
:rtype: str
"""
return 'https://{hostname}/{version}/{route_name}'.format(
hostname=hostname,
version=Dropbox.API_VERSION,
route_name=route_name,
)
def save_body_to_file(self, download_path, http_resp, chunksize=2**16):
"""
Saves the body of an HTTP response to a file.
:param str download_path: Local path to save data to.
:param http_resp: The HTTP response whose body will be saved.
:type http_resp: :class:`requests.models.Response`
:rtype: None
"""
with open(download_path, 'wb') as f:
for c in http_resp.iter_content(chunksize):
f.write(c)
http_resp.close()
| {
"repo_name": "CoolCloud/dropbox-sdk-python",
"path": "dropbox/dropbox.py",
"copies": "1",
"size": "15325",
"license": "mit",
"hash": -9203606141165584000,
"line_mean": 40.307277628,
"line_max": 87,
"alpha_frac": 0.553278956,
"autogenerated": false,
"ratio": 4.527326440177252,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5580605396177252,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'dump',
'generate_release_metadata',
'get_output_dir_path',
'get_output_label',
]
import dataclasses
import json
import typing
import foreman
from g1 import scripts
from g1.bases.assertions import ASSERT
@dataclasses.dataclass(frozen=True)
class ReleaseMetadata:
@dataclasses.dataclass(frozen=True)
class Source:
url: str
revision: str
dirty: bool
sources: typing.List[Source]
def get_output_label(name):
return foreman.Label.parse(
'//%s:%s' % ('/'.join(foreman.get_relpath().parts[1:]), name)
)
def get_output_dir_path(parameters, name, version):
"""Get pod or xar dir path under release repo."""
return (
parameters['//releases:root'] / \
foreman.get_relpath() /
name /
version
)
def generate_release_metadata(parameters, metadata_path):
dump(
ReleaseMetadata(
sources=[
_git_get_source(repo_path)
for repo_path in parameters['//bases:roots']
],
),
metadata_path,
)
def _git_get_source(source):
with scripts.using_cwd(source), scripts.doing_capture_stdout():
return ReleaseMetadata.Source(
url=_git_get_url(source),
revision=_git_get_revision(),
dirty=_git_get_dirty(),
)
def _git_get_url(source):
proc = scripts.run(['git', 'remote', '--verbose'])
for remote in proc.stdout.decode('utf-8').split('\n'):
remote = remote.split()
if remote[0] == 'origin':
return remote[1]
return ASSERT.unreachable('expect remote origin: {}', source)
def _git_get_revision():
proc = scripts.run(['git', 'log', '-1', '--format=format:%H'])
return proc.stdout.decode('ascii').strip()
def _git_get_dirty():
proc = scripts.run(['git', 'status', '--porcelain'])
for status in proc.stdout.decode('utf-8').split('\n'):
# Be careful of empty line!
if status and not status.startswith(' '):
return True
return False
def dump(obj, path):
scripts.write_bytes(
json.dumps(dataclasses.asdict(obj), indent=4).encode('ascii'),
path,
)
| {
"repo_name": "clchiou/garage",
"path": "shipyard2/shipyard2/rules/releases.py",
"copies": "1",
"size": "2209",
"license": "mit",
"hash": 6663348857964692000,
"line_mean": 22.5,
"line_max": 70,
"alpha_frac": 0.5885015844,
"autogenerated": false,
"ratio": 3.6633499170812605,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9750585037448839,
"avg_score": 0.0002532928064842958,
"num_lines": 94
} |
__all__ = [
'DynamicListBuilder',
'DynamicListReader',
'DynamicStructBuilder',
'DynamicStructReader',
]
import enum
import functools
import operator
from g1.bases import classes
from g1.bases import collections
from g1.bases.assertions import ASSERT
from . import _capnp
# pylint: disable=c-extension-no-member
from . import bases
from . import schemas
class Base(bases.Base):
_schema_type = type(None) # Sub-class must override this.
def __init__(self, message, schema, raw):
ASSERT.isinstance(schema, self._schema_type)
super().__init__(raw)
# Keep a strong reference to the root message to ensure that it
# is not garbage-collected before us.
self._message = message
self.schema = schema
__repr__ = classes.make_repr('schema={self.schema} {self!s}')
def __str__(self):
raise NotImplementedError
class DynamicListReader(Base):
"""Provide read-only list-like interface for ``DynamicList``."""
_schema_type = schemas.ListSchema
_raw_type = _capnp.DynamicList.Reader
def __init__(self, *args):
super().__init__(*args)
# TODO: For now, we do not share to_upper/to_lower among reader
# and builder objects, even though they might be derived from
# the same type, because we do not know how to define hash key
# from types. (Same below.)
self.__to_upper = _make_to_upper(self.schema.element_type, True)
def __str__(self):
return _capnp.TextCodec().encode(
_capnp.DynamicValue.Reader.fromDynamicList(self._raw)
)
def __len__(self):
return len(self._raw)
def __getitem__(self, index):
if not 0 <= index < len(self):
raise IndexError(index)
return self.__to_upper(self._message, self._raw[index])
class DynamicListBuilder(Base):
"""Provide list-like interface for ``DynamicList``."""
_schema_type = schemas.ListSchema
_raw_type = _capnp.DynamicList.Builder
def __init__(self, *args):
super().__init__(*args)
self.__to_upper = _make_to_upper(self.schema.element_type, False)
self.__to_lower = _make_to_lower(self.schema.element_type)
def __str__(self):
return _capnp.TextCodec().encode(
_capnp.DynamicValue.Reader.fromDynamicList(self._raw.asReader())
)
def __len__(self):
return len(self._raw)
def __getitem__(self, index):
if not 0 <= index < len(self):
raise IndexError(index)
return self.__to_upper(self._message, self._raw[index])
def __setitem__(self, index, value):
if not 0 <= index < len(self):
raise IndexError(index)
self._raw.set(index, self.__to_lower(value))
def init(self, index, size=None):
if not 0 <= index < len(self):
raise IndexError(index)
if self.schema.element_type.is_list():
ASSERT.greater_or_equal(size, 0)
return DynamicListBuilder(
self._message,
self.schema.element_type.as_list(),
self._raw.init(index, size).asDynamicList(),
)
else:
# Although Builder::init does not support struct type, to
# make interface consistent between list-of-struct and
# struct-of-struct, let's return something here rather than
# erring out.
ASSERT.true(self.schema.element_type.is_struct())
return self[index]
class DynamicStructReader(Base):
"""Provide read-only dict-like interface for ``DynamicStruct``."""
_schema_type = schemas.StructSchema
_raw_type = _capnp.DynamicStruct.Reader
def __init__(self, *args):
super().__init__(*args)
self.__to_uppers = collections.LoadingDict(
lambda field: _make_to_upper(field.type, True)
)
def __str__(self):
return _capnp.TextCodec().encode(
_capnp.DynamicValue.Reader.fromDynamicStruct(self._raw)
)
def __contains__(self, name):
return name in self.schema.fields
def __iter__(self):
return iter(self.schema.fields)
def __getitem__(self, name):
field = self.schema.fields[name]
return _struct_getitem(self, field, self.__to_uppers[field])
class DynamicStructBuilder(Base):
"""Provide dict-like interface for ``DynamicStruct``."""
_schema_type = schemas.StructSchema
_raw_type = _capnp.DynamicStruct.Builder
def __init__(self, *args):
super().__init__(*args)
self.__to_uppers = collections.LoadingDict(
lambda field: _make_to_upper(field.type, False)
)
self.__to_lowers = collections.LoadingDict(
lambda field: _make_to_lower(field.type)
)
def __str__(self):
return _capnp.TextCodec().encode(
_capnp.DynamicValue.Reader.fromDynamicStruct(self._raw.asReader())
)
def from_text(self, text):
_capnp.TextCodec().decode(text, self._raw)
def as_reader(self):
return DynamicStructReader(
self._message, self.schema, self._raw.asReader()
)
def __contains__(self, name):
return name in self.schema.fields
def __iter__(self):
return iter(self.schema.fields)
def __getitem__(self, name):
field = self.schema.fields[name]
return _struct_getitem(self, field, self.__to_uppers[field])
def __setitem__(self, name, value):
field = self.schema.fields[name]
self._raw.set(field._raw, self.__to_lowers[field](value))
def init(self, name, size=None):
field = self.schema.fields[name]
# For now let's only accept list and struct, but remember that
# ``capnp::DynamicStruct::Builder::init`` actually supports more
# types.
if field.type.is_list():
ASSERT.greater_or_equal(size, 0)
return DynamicListBuilder(
self._message,
field.type.as_list(),
self._raw.init(field._raw, size).asDynamicList(),
)
elif field.type.is_struct():
return DynamicStructBuilder(
self._message,
field.type.as_struct(),
self._raw.init(field._raw).asDynamicStruct(),
)
else:
return ASSERT.unreachable('unexpected item type: {}', field.type)
def __delitem__(self, name):
field = self.schema.fields[name]
self._raw.clear(field._raw)
def _struct_getitem(struct, field, to_upper):
# By the way, ``NON_NULL`` and ``NON_DEFAULT`` behave the same for
# pointer types.
if not struct._raw.has(field._raw, _capnp.HasMode.NON_NULL):
# Return ``None`` on named union fields.
if field.proto.is_group():
return None
# Return ``None`` on non-pointer fields without a default value.
if field.proto.is_slot() and not field.proto.slot.had_explicit_default:
return None
return to_upper(struct._message, struct._raw.get(field._raw))
_PRIMITIVE_TYPES = {
which: (
# type, to_upper, to_lower.
type_,
operator.methodcaller('as%s' % name),
getattr(_capnp.DynamicValue.Reader, 'from%s' % name),
)
for which, name, type_ in (
(_capnp.schema.Type.Which.VOID, 'Void', _capnp.VoidType),
(_capnp.schema.Type.Which.BOOL, 'Bool', bool),
(_capnp.schema.Type.Which.INT8, 'Int', int),
(_capnp.schema.Type.Which.INT16, 'Int', int),
(_capnp.schema.Type.Which.INT32, 'Int', int),
(_capnp.schema.Type.Which.INT64, 'Int', int),
(_capnp.schema.Type.Which.UINT8, 'Uint', int),
(_capnp.schema.Type.Which.UINT16, 'Uint', int),
(_capnp.schema.Type.Which.UINT32, 'Uint', int),
(_capnp.schema.Type.Which.UINT64, 'Uint', int),
(_capnp.schema.Type.Which.FLOAT32, 'Float', float),
(_capnp.schema.Type.Which.FLOAT64, 'Float', float),
)
}
def _make_to_upper(item_type, is_reader):
# Handle non-pointer types first.
result = _PRIMITIVE_TYPES.get(item_type.which)
if result:
return functools.partial(_primitive_to_upper, result[1])
if item_type.is_enum():
return _enum_to_upper
# Handle pointer types.
if item_type.is_text():
return _text_to_upper
if item_type.is_data():
return _data_to_upper
if item_type.is_list():
return functools.partial(
_list_to_upper,
# TODO: Sadly, this will break users who subclass
# DynamicListReader or DynamicListBuilder (same below) as we
# hard code types here.
DynamicListReader if is_reader else DynamicListBuilder,
item_type.as_list(),
)
if item_type.is_struct():
return functools.partial(
_struct_to_upper,
DynamicStructReader if is_reader else DynamicStructBuilder,
item_type.as_struct(),
)
if item_type.is_interface():
raise NotImplementedError('do not support interface for now')
if item_type.is_any_pointer():
raise NotImplementedError('do not support any-pointer for now')
return ASSERT.unreachable('unexpected item type: {}', item_type)
def _primitive_to_upper(to_upper, message, value):
del message # Unused.
return to_upper(value)
def _enum_to_upper(message, value):
del message # Unused.
# Simply return the enum value and do not convert it to Python enum
# type; implement the conversion at higher level.
return value.asDynamicEnum().getRaw()
def _text_to_upper(message, value):
del message # Unused.
# Should I return a memory view instead?
return str(value.asText(), 'utf-8')
def _data_to_upper(message, value):
del message # Unused.
return value.asData()
def _list_to_upper(list_type, schema, message, value):
return list_type(message, schema, value.asDynamicList())
def _struct_to_upper(struct_type, schema, message, value):
return struct_type(message, schema, value.asDynamicStruct())
def _make_to_lower(item_type):
# Handle non-pointer types first.
result = _PRIMITIVE_TYPES.get(item_type.which)
if result:
return functools.partial(_primitive_to_lower, result[0], result[2])
if item_type.is_enum():
return functools.partial(_enum_to_lower, item_type.as_enum())
# Handle pointer types.
if item_type.is_text():
return _text_to_lower
if item_type.is_data():
return _data_to_lower
if item_type.is_list():
return _list_to_lower
if item_type.is_struct():
return _struct_to_lower
if item_type.is_interface():
raise NotImplementedError('do not support interface for now')
if item_type.is_any_pointer():
raise NotImplementedError('do not support any-pointer for now')
return ASSERT.unreachable('unexpected item type: {}', item_type)
def _primitive_to_lower(type_, to_lower, value):
ASSERT.isinstance(value, type_)
return to_lower(value)
def _enum_to_lower(schema, value):
if isinstance(value, enum.Enum):
value = value.value
ASSERT.isinstance(value, int)
return _capnp.DynamicValue.Reader.fromDynamicEnum(
_capnp.DynamicEnum(schema._raw, value)
)
def _text_to_lower(value):
ASSERT.isinstance(value, str)
return _capnp.DynamicValue.Reader.fromText(value)
def _data_to_lower(value):
ASSERT.isinstance(value, (bytes, memoryview))
return _capnp.DynamicValue.Reader.fromData(value)
def _list_to_lower(value):
if isinstance(value, DynamicListReader):
reader = value._raw
else:
ASSERT.isinstance(value, DynamicListBuilder)
reader = value._raw.asReader()
return _capnp.DynamicValue.Reader.fromDynamicList(reader)
def _struct_to_lower(value):
if isinstance(value, DynamicStructReader):
reader = value._raw
else:
ASSERT.isinstance(value, DynamicStructBuilder)
reader = value._raw.asReader()
return _capnp.DynamicValue.Reader.fromDynamicStruct(reader)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/third-party/capnp/capnp/dynamics.py",
"copies": "1",
"size": "12144",
"license": "mit",
"hash": 8128664539617976000,
"line_mean": 29.4360902256,
"line_max": 79,
"alpha_frac": 0.6164361001,
"autogenerated": false,
"ratio": 3.721728470732455,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9838164570832455,
"avg_score": 0,
"num_lines": 399
} |
__all__ = [
'DynamicObject',
'DynamicListAdapter',
'register_converter',
'register_enum',
'register_serializer',
]
import collections
import collections.abc
import enum
import warnings
from . import bases
from .schemas import Type
from .dynamics import DynamicEnum
from .dynamics import DynamicList
from .dynamics import DynamicStruct
_CONVERTER_TABLE = {}
_SERIALIZER_TABLE = collections.OrderedDict()
def register_converter(type_, converter):
"""Register a converter for the given type.
A converter transforms capnp-domain value into Python-domain.
"""
if type_ in _CONVERTER_TABLE:
raise ValueError('cannot override converter: type=%r' % type_)
_CONVERTER_TABLE[type_] = converter
def register_serializer(type_, serializer):
"""Register a serializer for the given type and all its sub-types.
A serializer transforms Python value into another that is suitable
for JSON or YAML serialization.
Note that serializer is matched with all sub-types because it is
common that you sub-class a Python type (which is not so for capnp-
domain types).
"""
if type_ in _SERIALIZER_TABLE:
raise ValueError('cannot override serializer: type=%r' % type_)
_SERIALIZER_TABLE[type_] = serializer
def _identity_func(value):
return value
def _convert(value):
return _CONVERTER_TABLE.get(type(value), _identity_func)(value)
def _serialize(value):
for type_, serializer in _SERIALIZER_TABLE.items():
if isinstance(value, type_):
value = serializer(value)
break
return value
def _set_root(node, leaf):
"""Add a reference from leaf to root.
This should prevent root node from being garbage collected while
leaf is still alive (downside is that it may retain more memory).
"""
if isinstance(leaf, (DynamicObject, DynamicListAdapter)):
assert leaf._root is None
leaf._root = node if node._root is None else node._root
return leaf
_ENUM_TYPES = {}
def register_enum(schema):
if schema in _ENUM_TYPES:
raise ValueError('cannot override: %r' % schema)
cls = schema.generate_enum()
_ENUM_TYPES[schema] = cls
return cls
def convert_enum(dynamic_enum):
try:
cls = _ENUM_TYPES[dynamic_enum.schema]
except KeyError:
warnings.warn('dynamic enum is not registered: %r' % dynamic_enum)
return dynamic_enum.get()
return cls(dynamic_enum.get())
register_converter(DynamicEnum, convert_enum)
register_serializer(enum.Enum, lambda e: e.value)
class DynamicObjectMeta(type):
DYNAMIC_OBJECT_CLASS = {}
@classmethod
def convert_struct(mcs, struct):
cls = mcs.DYNAMIC_OBJECT_CLASS.get(struct.schema, DynamicObject)
return cls(struct)
def __new__(mcs, class_name, base_classes, namespace, schema=None):
if schema in mcs.DYNAMIC_OBJECT_CLASS:
raise ValueError('cannot override: %r' % schema)
if schema is not None:
namespace['_schema'] = schema
cls = super().__new__(mcs, class_name, base_classes, namespace)
if schema is not None:
mcs.DYNAMIC_OBJECT_CLASS[schema] = cls
return cls
def __init__(cls, name, base_classes, namespace, **_):
super().__init__(name, base_classes, namespace)
class DynamicObject(metaclass=DynamicObjectMeta):
"""Let you access DynamicStruct like a regular object.
NOTE: Cap'n Proto's data model is quite different from the normal
Python object semantics - at least for now I can't reconcile the
differences of the two sides; as a result, this class is quite
awkward to use at the moment.
"""
__annotations__ = {}
_schema = None
@classmethod
def _make(cls, message, schema=None):
"""Make a DynamicObject from message and default schema.
This will "own" the message object, and thus you should neither
open the message before calling this, nor close the message
afterwards.
"""
if schema is None:
schema = cls._schema
assert schema is not None
message.open()
try:
obj = cls(message.get_root(schema))
obj._message = message
return obj
except:
message.close()
raise
def __init__(self, struct):
assert isinstance(struct, (DynamicStruct, DynamicStruct.Builder))
self._message = None
self._struct = struct
self._root = None
def __del__(self):
# Release C++ resources, just to be safe.
self._close()
def __enter__(self):
return self
def __exit__(self, *args):
self._close()
def _close(self):
if self._message is not None:
self._struct, self._message, message = None, None, self._message
message.close()
@property
def _closed(self):
if self._root:
return self._root._closed
else:
return self._struct is None
def _as_reader(self):
assert not self._closed
return _set_root(self, self.__class__(self._struct.as_reader()))
def _items(self):
assert not self._closed
for camel_case in self._struct.keys():
name = bases.camel_to_lower_snake(camel_case)
# Use getattr() so that converter may participate.
value = getattr(self, name)
yield name, value
def _serialize_asdict(self):
assert not self._closed
return collections.OrderedDict(
(name, _serialize(value))
for name, value in self._items()
)
def _init(self, name, size=None):
assert not self._closed
camel_case = bases.snake_to_lower_camel(name)
value = _convert(self._struct.init(camel_case, size))
value = self.__annotations__.get(name, _identity_func)(value)
return _set_root(self, value)
def __getattr__(self, name):
assert not self._closed
# Translate name.
camel_case = bases.snake_to_lower_camel(name)
try:
field = self._struct.schema[camel_case]
except KeyError:
msg = '%s has no field %r' % (self._struct.schema, camel_case)
raise AttributeError(msg) from None
# Retrieve the attribute.
try:
value = self._struct[camel_case]
except KeyError:
# Return default value for this attribute.
if field.type.kind is Type.Kind.LIST:
return ()
else:
return None
# Apply registered converter.
value = _convert(value)
# Apply per-struct converter.
value = self.__annotations__.get(name, _identity_func)(value)
return _set_root(self, value)
def __setattr__(self, name, value):
# Special case for attribute name started with '_'.
if name.startswith('_'):
super().__setattr__(name, value)
return
assert not self._closed
camel_case = bases.snake_to_lower_camel(name)
try:
field = self._struct.schema[camel_case]
except KeyError:
msg = '%s has no field %r' % (self._struct.schema, camel_case)
raise AttributeError(msg) from None
_setter_helper(
field.type,
self._struct,
camel_case,
value,
lambda: getattr(self, name),
)
def __delattr__(self, name):
# Special case for attribute name started with '_'.
if name.startswith('_'):
super().__delattr__(name)
return
assert not self._closed
camel_case = bases.snake_to_lower_camel(name)
try:
self._struct.pop(camel_case)
except KeyError:
msg = '%s cannot delete %r' % (self._struct.schema, camel_case)
raise AttributeError(msg) from None
def __str__(self):
return str(self._struct)
__repr__ = bases.repr_object
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self._struct == other._struct
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._struct)
register_converter(DynamicStruct, DynamicObjectMeta.convert_struct)
register_converter(DynamicStruct.Builder, DynamicObjectMeta.convert_struct)
register_serializer(DynamicObject, DynamicObject._serialize_asdict)
class DynamicListAdapter(collections.abc.MutableSequence):
def __init__(self, list_):
assert isinstance(list_, (DynamicList, DynamicList.Builder))
self._list = list_
self._root = None
self._convert_item = None
@property
def _closed(self):
# A list is never a root by itself.
assert self._root
return self._root._closed
def __convert(self, value):
return (self._convert_item or _identity_func)(_convert(value))
def _serialize_aslist(self):
assert not self._closed
return list(map(_serialize, self))
def __len__(self):
assert not self._closed
return len(self._list)
def __iter__(self):
assert not self._closed
for obj in map(self.__convert, self._list):
yield _set_root(self, obj)
def _init(self, index, size=None):
assert not self._closed
obj = self.__convert(self._list.init(index, size))
return _set_root(self, obj)
def __getitem__(self, index):
assert not self._closed
obj = self.__convert(self._list[index])
return _set_root(self, obj)
def __setitem__(self, index, value):
assert not self._closed
_setter_helper(
self._list.schema.element_type,
self._list,
index,
value,
lambda: self[index],
)
def __delitem__(self, index):
raise IndexError('do not support __delitem__')
def insert(self, index, value):
raise IndexError('do not support insert')
def __str__(self):
return str(self._list)
__repr__ = bases.repr_object
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self._list == other._list
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._list)
register_converter(DynamicList, DynamicListAdapter)
register_converter(DynamicList.Builder, DynamicListAdapter)
register_serializer(DynamicListAdapter, DynamicListAdapter._serialize_aslist)
def _setter_helper(type_, target, key, value, get_obj):
if type_.kind is Type.Kind.VOID:
target[key] = value
elif type_.kind.is_scalar:
if value is None:
if key in target:
del target[key]
else:
target[key] = value
elif type_.kind is Type.Kind.LIST:
if value:
target.init(key, len(value))
obj = get_obj()
for index, element in enumerate(value):
obj[index] = element
else:
if key in target:
del target[key]
elif type_.kind is Type.Kind.STRUCT:
if (isinstance(value, DynamicObject) and
type_.schema is value._struct.schema):
target.init(key)
obj = get_obj()
obj._struct.copy_from(value._struct)
elif isinstance(value, collections.abc.Mapping):
target.init(key)
obj = get_obj()
for k, v in value.items():
setattr(obj, k, v)
elif not value:
if key in target:
del target[key]
else:
raise ValueError(
'cannot assign from: %s %s %r' % (type_, key, value))
else:
raise AssertionError('cannot assign to: %s %s' % (type_, key))
| {
"repo_name": "clchiou/garage",
"path": "py/capnp/capnp/objects.py",
"copies": "1",
"size": "12005",
"license": "mit",
"hash": -3669758189165029000,
"line_mean": 27.0490654206,
"line_max": 77,
"alpha_frac": 0.594002499,
"autogenerated": false,
"ratio": 4.181469871125044,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00014602803738317756,
"num_lines": 428
} |
__all__=(
'Ean13BarcodeWidget','isEanString',
'Ean8BarcodeWidget', 'UPCA', 'Ean5BarcodeWidget', 'ISBNBarcodeWidget',
)
from reportlab.graphics.shapes import Group, String, Rect
from reportlab.lib import colors
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.lib.validators import isNumber, isColor, isString, Validator, isBoolean, NoneOr
from reportlab.lib.attrmap import *
from reportlab.graphics.charts.areas import PlotArea
from reportlab.lib.units import mm
from reportlab.lib.utils import asNative
#work out a list of manufacturer codes....
_eanNumberSystems = [
('00-13', 'USA & Canada'),
('20-29', 'In-Store Functions'),
('30-37', 'France'),
('40-44', 'Germany'),
('45', 'Japan (also 49)'),
('46', 'Russian Federation'),
('471', 'Taiwan'),
('474', 'Estonia'),
('475', 'Latvia'),
('477', 'Lithuania'),
('479', 'Sri Lanka'),
('480', 'Philippines'),
('482', 'Ukraine'),
('484', 'Moldova'),
('485', 'Armenia'),
('486', 'Georgia'),
('487', 'Kazakhstan'),
('489', 'Hong Kong'),
('49', 'Japan (JAN-13)'),
('50', 'United Kingdom'),
('520', 'Greece'),
('528', 'Lebanon'),
('529', 'Cyprus'),
('531', 'Macedonia'),
('535', 'Malta'),
('539', 'Ireland'),
('54', 'Belgium & Luxembourg'),
('560', 'Portugal'),
('569', 'Iceland'),
('57', 'Denmark'),
('590', 'Poland'),
('594', 'Romania'),
('599', 'Hungary'),
('600-601', 'South Africa'),
('609', 'Mauritius'),
('611', 'Morocco'),
('613', 'Algeria'),
('619', 'Tunisia'),
('622', 'Egypt'),
('625', 'Jordan'),
('626', 'Iran'),
('64', 'Finland'),
('690-692', 'China'),
('70', 'Norway'),
('729', 'Israel'),
('73', 'Sweden'),
('740', 'Guatemala'),
('741', 'El Salvador'),
('742', 'Honduras'),
('743', 'Nicaragua'),
('744', 'Costa Rica'),
('746', 'Dominican Republic'),
('750', 'Mexico'),
('759', 'Venezuela'),
('76', 'Switzerland'),
('770', 'Colombia'),
('773', 'Uruguay'),
('775', 'Peru'),
('777', 'Bolivia'),
('779', 'Argentina'),
('780', 'Chile'),
('784', 'Paraguay'),
('785', 'Peru'),
('786', 'Ecuador'),
('789', 'Brazil'),
('80-83', 'Italy'),
('84', 'Spain'),
('850', 'Cuba'),
('858', 'Slovakia'),
('859', 'Czech Republic'),
('860', 'Yugloslavia'),
('869', 'Turkey'),
('87', 'Netherlands'),
('880', 'South Korea'),
('885', 'Thailand'),
('888', 'Singapore'),
('890', 'India'),
('893', 'Vietnam'),
('899', 'Indonesia'),
('90-91', 'Austria'),
('93', 'Australia'),
('94', 'New Zealand'),
('955', 'Malaysia'),
('977', 'International Standard Serial Number for Periodicals (ISSN)'),
('978', 'International Standard Book Numbering (ISBN)'),
('979', 'International Standard Music Number (ISMN)'),
('980', 'Refund receipts'),
('981-982', 'Common Currency Coupons'),
('99', 'Coupons')
]
manufacturerCodes = {}
for (k, v) in _eanNumberSystems:
words = k.split('-')
if len(words)==2:
fromCode = int(words[0])
toCode = int(words[1])
for code in range(fromCode, toCode+1):
manufacturerCodes[code] = v
else:
manufacturerCodes[int(k)] = v
def nDigits(n):
class _ndigits(Validator):
def test(self,x):
return type(x) is str and len(x)<=n and len([c for c in x if c in "0123456789"])==n
return _ndigits()
class Ean13BarcodeWidget(PlotArea):
codeName = "EAN13"
_attrMap = AttrMap(BASE=PlotArea,
value = AttrMapValue(nDigits(12), desc='the number'),
fontName = AttrMapValue(isString, desc='fontName'),
fontSize = AttrMapValue(isNumber, desc='font size'),
x = AttrMapValue(isNumber, desc='x-coord'),
y = AttrMapValue(isNumber, desc='y-coord'),
barFillColor = AttrMapValue(isColor, desc='bar color'),
barHeight = AttrMapValue(isNumber, desc='Height of bars.'),
barWidth = AttrMapValue(isNumber, desc='Width of bars.'),
barStrokeWidth = AttrMapValue(isNumber, desc='Width of bar borders.'),
barStrokeColor = AttrMapValue(isColor, desc='Color of bar borders.'),
textColor = AttrMapValue(isColor, desc='human readable text color'),
humanReadable = AttrMapValue(isBoolean, desc='if human readable'),
quiet = AttrMapValue(isBoolean, desc='if quiet zone to be used'),
lquiet = AttrMapValue(isBoolean, desc='left quiet zone length'),
rquiet = AttrMapValue(isBoolean, desc='right quiet zone length'),
)
_digits=12
_start_right = 7 #for ean-13 left = [0:7] right=[7:13]
_nbars = 113
barHeight = 25.93*mm #millimeters
barWidth = (37.29/_nbars)*mm
humanReadable = 1
_0csw = 1
_1csw = 3
#Left Hand Digits.
_left = ( ("0001101", "0011001", "0010011", "0111101",
"0100011", "0110001", "0101111", "0111011",
"0110111", "0001011",
), #odd left hand digits
("0100111", "0110011", "0011011", "0100001",
"0011101", "0111001", "0000101", "0010001",
"0001001", "0010111"), #even left hand digits
)
_right = ("1110010", "1100110", "1101100", "1000010",
"1011100", "1001110", "1010000", "1000100",
"1001000", "1110100")
quiet = 1
rquiet = lquiet = None
_tail = "101"
_sep = "01010"
_lhconvert={
"0": (0,0,0,0,0,0),
"1": (0,0,1,0,1,1),
"2": (0,0,1,1,0,1),
"3": (0,0,1,1,1,0),
"4": (0,1,0,0,1,1),
"5": (0,1,1,0,0,1),
"6": (0,1,1,1,0,0),
"7": (0,1,0,1,0,1),
"8": (0,1,0,1,1,0),
"9": (0,1,1,0,1,0)
}
fontSize = 8 #millimeters
fontName = 'Helvetica'
textColor = barFillColor = colors.black
barStrokeColor = None
barStrokeWidth = 0
x = 0
y = 0
def __init__(self,value='123456789012',**kw):
value = str(value) if isinstance(value,int) else asNative(value)
self.value=max(self._digits-len(value),0)*'0'+value[:self._digits]
for k, v in kw.items():
setattr(self, k, v)
width = property(lambda self: self.barWidth*(self._nbars-18+self._calc_quiet(self.lquiet)+self._calc_quiet(self.rquiet)))
def wrap(self,aW,aH):
return self.width,self.barHeight
def _encode_left(self,s,a):
cp = self._lhconvert[s[0]] #convert the left hand numbers
_left = self._left
z = ord('0')
for i,c in enumerate(s[1:self._start_right]):
a(_left[cp[i]][ord(c)-z])
def _short_bar(self,i):
i += 9 - self._lquiet
return self.humanReadable and ((12<i<55) or (57<i<101))
def _calc_quiet(self,v):
if self.quiet:
if v is None:
v = 9
else:
x = float(max(v,0))/self.barWidth
v = int(x)
if v-x>0: v += 1
else:
v = 0
return v
def draw(self):
g = Group()
gAdd = g.add
barWidth = self.barWidth
width = self.width
barHeight = self.barHeight
x = self.x
y = self.y
gAdd(Rect(x,y,width,barHeight,fillColor=None,strokeColor=None,strokeWidth=0))
s = self.value+self._checkdigit(self.value)
self._lquiet = lquiet = self._calc_quiet(self.lquiet)
rquiet = self._calc_quiet(self.rquiet)
b = [lquiet*'0',self._tail] #the signal string
a = b.append
self._encode_left(s,a)
a(self._sep)
z = ord('0')
_right = self._right
for c in s[self._start_right:]:
a(_right[ord(c)-z])
a(self._tail)
a(rquiet*'0')
fontSize = self.fontSize
barFillColor = self.barFillColor
barStrokeWidth = self.barStrokeWidth
barStrokeColor = self.barStrokeColor
fth = fontSize*1.2
b = ''.join(b)
lrect = None
for i,c in enumerate(b):
if c=="1":
dh = self._short_bar(i) and fth or 0
yh = y+dh
if lrect and lrect.y==yh:
lrect.width += barWidth
else:
lrect = Rect(x,yh,barWidth,barHeight-dh,fillColor=barFillColor,strokeWidth=barStrokeWidth,strokeColor=barStrokeColor)
gAdd(lrect)
else:
lrect = None
x += barWidth
if self.humanReadable: self._add_human_readable(s,gAdd)
return g
def _add_human_readable(self,s,gAdd):
barWidth = self.barWidth
fontSize = self.fontSize
textColor = self.textColor
fontName = self.fontName
fth = fontSize*1.2
# draw the num below the line.
c = s[0]
w = stringWidth(c,fontName,fontSize)
x = self.x+barWidth*(self._lquiet-8)
y = self.y + 0.2*fth
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor))
x = self.x + (33-9+self._lquiet)*barWidth
c = s[1:7]
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor,textAnchor='middle'))
x += 47*barWidth
c = s[7:]
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor,textAnchor='middle'))
def _checkdigit(cls,num):
z = ord('0')
iSum = cls._0csw*sum([(ord(x)-z) for x in num[::2]]) \
+ cls._1csw*sum([(ord(x)-z) for x in num[1::2]])
return chr(z+((10-(iSum%10))%10))
_checkdigit=classmethod(_checkdigit)
class Ean8BarcodeWidget(Ean13BarcodeWidget):
codeName = "EAN8"
_attrMap = AttrMap(BASE=Ean13BarcodeWidget,
value = AttrMapValue(nDigits(7), desc='the number'),
)
_start_right = 4 #for ean-13 left = [0:7] right=[7:13]
_nbars = 85
_digits=7
_0csw = 3
_1csw = 1
def _encode_left(self,s,a):
cp = self._lhconvert[s[0]] #convert the left hand numbers
_left = self._left[0]
z = ord('0')
for i,c in enumerate(s[0:self._start_right]):
a(_left[ord(c)-z])
def _short_bar(self,i):
i += 9 - self._lquiet
return self.humanReadable and ((12<i<41) or (43<i<73))
def _add_human_readable(self,s,gAdd):
barWidth = self.barWidth
fontSize = self.fontSize
textColor = self.textColor
fontName = self.fontName
fth = fontSize*1.2
# draw the num below the line.
y = self.y + 0.2*fth
x = (26.5-9+self._lquiet)*barWidth
c = s[0:4]
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor,textAnchor='middle'))
x = (59.5-9+self._lquiet)*barWidth
c = s[4:]
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor,textAnchor='middle'))
class UPCA(Ean13BarcodeWidget):
codeName = "UPCA"
_attrMap = AttrMap(BASE=Ean13BarcodeWidget,
value = AttrMapValue(nDigits(11), desc='the number'),
)
_start_right = 6
_digits = 11
_0csw = 3
_1csw = 1
_nbars = 1+7*11+2*3+5
#these methods contributed by Kyle Macfarlane
#https://bitbucket.org/kylemacfarlane/
def _encode_left(self,s,a):
cp = self._lhconvert[s[0]] #convert the left hand numbers
_left = self._left[0]
z = ord('0')
for i,c in enumerate(s[0:self._start_right]):
a(_left[ord(c)-z])
def _short_bar(self,i):
i += 9 - self._lquiet
return self.humanReadable and ((18<i<55) or (57<i<93))
def _add_human_readable(self,s,gAdd):
barWidth = self.barWidth
fontSize = self.fontSize
textColor = self.textColor
fontName = self.fontName
fth = fontSize*1.2
# draw the num below the line.
c = s[0]
w = stringWidth(c,fontName,fontSize)
x = self.x+barWidth*(self._lquiet-8)
y = self.y + 0.2*fth
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor))
x = self.x + (38-9+self._lquiet)*barWidth
c = s[1:6]
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor,textAnchor='middle'))
x += 36*barWidth
c = s[6:11]
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor,textAnchor='middle'))
x += 32*barWidth
c = s[11]
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor))
class Ean5BarcodeWidget(Ean13BarcodeWidget):
"""
EAN-5 barcodes can print the human readable price, set:
price=True
"""
codeName = "EAN5"
_attrMap = AttrMap(BASE=Ean13BarcodeWidget,
price=AttrMapValue(isBoolean,
desc='whether to display the price or not'),
value=AttrMapValue(nDigits(5), desc='the number'),
)
_nbars = 48
_digits = 5
_sep = '01'
_tail = '01011'
_0csw = 3
_1csw = 9
_lhconvert = {
"0": (1, 1, 0, 0, 0),
"1": (1, 0, 1, 0, 0),
"2": (1, 0, 0, 1, 0),
"3": (1, 0, 0, 0, 1),
"4": (0, 1, 1, 0, 0),
"5": (0, 0, 1, 1, 0),
"6": (0, 0, 0, 1, 1),
"7": (0, 1, 0, 1, 0),
"8": (0, 1, 0, 0, 1),
"9": (0, 0, 1, 0, 1)
}
def _checkdigit(cls, num):
z = ord('0')
iSum = cls._0csw * sum([(ord(x) - z) for x in num[::2]]) \
+ cls._1csw * sum([(ord(x) - z) for x in num[1::2]])
return chr(z + iSum % 10)
def _encode_left(self, s, a):
check = self._checkdigit(s)
cp = self._lhconvert[check]
_left = self._left
_sep = self._sep
z = ord('0')
full_code = []
for i, c in enumerate(s):
full_code.append(_left[cp[i]][ord(c) - z])
a(_sep.join(full_code))
def _short_bar(self, i):
i += 9 - self._lquiet
return self.humanReadable and ((12 < i < 41) or (43 < i < 73))
def _add_human_readable(self, s, gAdd):
barWidth = self.barWidth
fontSize = self.fontSize
textColor = self.textColor
fontName = self.fontName
fth = fontSize * 1.2
# draw the num below the line.
y = self.y + 0.2 * fth
x = self.x + (self._nbars + self._lquiet * 2) * barWidth / 2
gAdd(String(x, y, s, fontName=fontName, fontSize=fontSize,
fillColor=textColor, textAnchor='middle'))
price = getattr(self,'price',None)
if price:
price = None
if s[0] in '3456':
price = '$'
elif s[0] in '01':
price = '\xc2\xa3'
if price is None:
return
price += s[1:3] + '.' + s[3:5]
y += self.barHeight
gAdd(String(x, y, price, fontName=fontName, fontSize=fontSize,
fillColor=textColor, textAnchor='middle'))
def draw(self):
g = Group()
gAdd = g.add
barWidth = self.barWidth
width = self.width
barHeight = self.barHeight
x = self.x
y = self.y
gAdd(Rect(x, y, width, barHeight, fillColor=None, strokeColor=None,
strokeWidth=0))
s = self.value
self._lquiet = lquiet = self._calc_quiet(self.lquiet)
rquiet = self._calc_quiet(self.rquiet)
b = [lquiet * '0' + self._tail] # the signal string
a = b.append
self._encode_left(s, a)
a(rquiet * '0')
fontSize = self.fontSize
barFillColor = self.barFillColor
barStrokeWidth = self.barStrokeWidth
barStrokeColor = self.barStrokeColor
fth = fontSize * 1.2
b = ''.join(b)
lrect = None
for i, c in enumerate(b):
if c == "1":
dh = fth
yh = y + dh
if lrect and lrect.y == yh:
lrect.width += barWidth
else:
lrect = Rect(x, yh, barWidth, barHeight - dh,
fillColor=barFillColor,
strokeWidth=barStrokeWidth,
strokeColor=barStrokeColor)
gAdd(lrect)
else:
lrect = None
x += barWidth
if self.humanReadable:
self._add_human_readable(s, gAdd)
return g
class ISBNBarcodeWidget(Ean13BarcodeWidget):
"""
ISBN Barcodes optionally print the EAN-5 supplemental price
barcode (with the price in dollars or pounds). Set price to a string
that follows the EAN-5 for ISBN spec:
leading digit 0, 1 = GBP
3 = AUD
4 = NZD
5 = USD
6 = CAD
next 4 digits = price between 00.00 and 99.98, i.e.:
price='52499' # $24.99 USD
"""
codeName = 'ISBN'
_attrMap = AttrMap(BASE=Ean13BarcodeWidget,
price=AttrMapValue(
NoneOr(nDigits(5)),
desc='None or the price to display'),
)
def draw(self):
g = Ean13BarcodeWidget.draw(self)
price = getattr(self,'price',None)
if not price:
return g
bounds = g.getBounds()
x = bounds[2]
pricecode = Ean5BarcodeWidget(x=x, value=price, price=True,
humanReadable=True,
barHeight=self.barHeight, quiet=self.quiet)
g.add(pricecode)
return g
def _add_human_readable(self, s, gAdd):
Ean13BarcodeWidget._add_human_readable(self,s, gAdd)
barWidth = self.barWidth
barHeight = self.barHeight
fontSize = self.fontSize
textColor = self.textColor
fontName = self.fontName
fth = fontSize * 1.2
y = self.y + 0.2 * fth + barHeight
x = self._lquiet * barWidth
isbn = 'ISBN '
segments = [s[0:3], s[3:4], s[4:9], s[9:12], s[12]]
isbn += '-'.join(segments)
gAdd(String(x, y, isbn, fontName=fontName, fontSize=fontSize,
fillColor=textColor))
| {
"repo_name": "sandeepkoduri/GAE-html-to-pdf",
"path": "libs/reportlab/graphics/barcode/eanbc.py",
"copies": "4",
"size": "18911",
"license": "mit",
"hash": 2372243573872961500,
"line_mean": 31.9459930314,
"line_max": 137,
"alpha_frac": 0.5107080535,
"autogenerated": false,
"ratio": 3.293451758969,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5804159812469001,
"avg_score": null,
"num_lines": null
} |
# All needed as part of functions
from math import *
from decimal import *
from random import shuffle
from os import path
from inspect import signature, getdoc
from re import compile, split
import csv
import ScrapingFunctions as SF
from hmc_urllib import getHTML
from RankingSettings import * # Imports the dictionary of tags with names as well as global settings
from Glicko2 import *
"""
Copyright (c) 2015 Abraham Schulte
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
Titles = ['Melee', 'PM', 'Sm4sh', 'SSB', 'Brawl']
Sortings = ['Bottom', 'Low', 'Middle']
SSBPersonDict = {}
MeleePersonDict = {}
PMPersonDict = {}
BrawlPersonDict = {}
Sm4shPersonDict = {}
def TitleDict(Title):
"""Gives the dictionary associated with Title.
Title: a string ('Melee', 'PM', 'Sm4sh', 'SSB', or 'Brawl')."""
if Title == 'SSB':
Dict = SSBPersonDict
if Title == 'Melee':
Dict = MeleePersonDict
if Title == 'PM':
Dict = PMPersonDict
if Title == 'Brawl':
Dict = BrawlPersonDict
if Title == 'Sm4sh':
Dict = Sm4shPersonDict
return Dict
def AddPerson(Person, Dict, Rating = DefaultRating, RD = DefaultRD, Vol = DefaultVol):
"""Adds person to Dict with the given Glicko2 rankings.
Person: a string.
Dict: one of the Title dictionaries (recommended to use the TitleDict function).
Rating: a float; the initial rating of Person.
RD: a float; the initial rating deviation of Person.
Vol: a float; the initial volitility of Person."""
if Person not in Dict:
Dict[Person] = [Rating, RD, Vol, 0, []]
if Person not in TagDict:
TagDict[Person] = ''
def AppendFile(Person, Dict, File):
"""Adds File to the list of files associated with Person in Dict.
Person: a string.
Dict: one of the Title dictionaries (recommended to use the TitleDict function).
File: a string; the file to be associated with Person in Dict."""
if File not in Dict[Person][4]:
Dict[Person][4] += [File]
def Replacements(s):
"""Fixes a string to change all worded numbers to their number equivalents (one --> 1, etc.),
as well as adding spaces to last initials (JohnD. --> John D.) and fixing all the typoes and changed tags
from ReplacementList, which is located in RankingSettings.
s: a string."""
for NumFix in NumFixes:
if s == NumFix[1]:
s = s.replace(NumFix[1], NumFix[0])
for Upper in uppercases:
if s[::-1][0:2] == '.' + Upper:
if s[::-1][0:3] != '.' + Upper + ' ':
s = s[::-1].replace('.' + Upper, '.' + Upper + ' ', 1)[::-1]
for Replacement in ReplacementList:
if s == Replacement[0]:
s = s.replace(Replacement[0], Replacement[1])
for Tag in TagDict:
if s == TagDict[Tag]:
s = Tag
return s
def Addtxt(TxtFile):
if TxtFile[-4:] != '.txt':
TxtFile = TxtFile + '.txt'
return TxtFile
def GetDataTxt(TxtFile, Dict):
"""Opens TxtFile, adds all players to Dict along with default values,
adds TxtFile a player appears in to their Dict entry, and returns a list with the results
of each match in TxtFile. Each match should formatted as P1 P1wins P2 P2wins,
with each match separated by a carriage return (a new line).
TxtFiles: a string; the .txt file to be read.
Dict: one of the Title dictionaries (recommended to use the TitleDict function)."""
TxtFile = Addtxt(TxtFile)
if 'ResultsFolder' in globals():
TxtFile = ResultsFolder + TxtFile
MatchResults = []
with open(TxtFile, encoding="ISO-8859-1") as File:
for NumFix in NumFixes:
File = File.replace(NumFix[0], NumFix[1])
FileResults = File.readlines()
FileResults = [split(",", s)[:] for s in FileResults]
for i in range(len(FileResults)):
FileResults[i][0] = Replacements(FileResults[i][0])
FileResults[i][1] = int(FileResults[i][1])
FileResults[i][2] = Replacements(FileResults[i][2])
FileResults[i][3] = int(FileResults[i][3])
for CurrentMatch in FileResults:
AddPerson(CurrentMatch[0], Dict)
AppendFile(CurrentMatch[0], Dict, path.basename(TxtFile)[0:-4])
AddPerson(CurrentMatch[2], Dict)
AppendFile(CurrentMatch[2], Dict, path.basename(TxtFile)[0:-4])
MatchResults.append(FileResults)
return FileResults
letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
letters = [letter for letter in letters]
uppercases = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
uppercases = [letter for letter in uppercases]
numbers = [str(i) for i in range(100)]
def WriteTxtFromChallonge(Challonge, TxtFile):
"""Writes the results from a Challonge URL to TxtFile.
Challonge: a string; the URL for a Challonge.
TxtFile: a string; the name of the file to be written.
Example: WriteTxtFromChallonge('http://apex2015melee.challonge.com/singles', 'Apex 2015')"""
TxtFile = Addtxt(TxtFile)
TheString = getHTML(Challonge)[0].replace('Mark as In Progress\n\n\nUnmark as In Progress\n\n\n\n', '') \
.replace('\n\n\n\n\n\nEdit\n\n\nReopen', '').split('\n\n\n\nMatch Details\n\n\n\n\n\n\n')[1:]
parsed_matches = ""
for item in TheString:
item = item.splitlines()
if item[2] == "" or item[7] == "":
continue
try:
if int(item[24]) < 0:
continue
except:
pass
try:
if int(item[27]) < 0:
continue
except:
pass
line = item[2] + "," + item[24] + "," + item[7] + "," + item[27]
line = SF.strip_match(line)
if line is not None and SF.parse_match(line) != "":
parsed_matches += SF.parse_match(line) + "\n"
with open(TxtFile, 'a') as file:
file.write(parsed_matches)
def GetGameData(TxtFile, Dict):
"""Opens TxtFile, does all the additions to Dict from GetData and returns a dictionary whose keys are the players
and whose key values are the individual games from each match, with the opponent's rating and RD at the time of the match,
the result, and the tag of the opponent, just in case that is ever needed in the future. Note that each game is
"double counted" (as it should be for Glicko2), with a game being recorded once as a victory for the winner
and once as a loss for the loser.
TxtFile: a string; the file to read.
Dict: one of the Title dictionaries (recommended to use the TitleDict function)."""
TxtFile = Addtxt(TxtFile)
MatchResults = GetDataTxt(TxtFile, Dict)
TitleResults = {}
for i in range(len(MatchResults)):
if MatchResults[i][0] not in TitleResults:
TitleResults[MatchResults[i][0]] = []
if MatchResults[i][2] not in TitleResults:
TitleResults[MatchResults[i][2]] = []
for CurrentMatch in MatchResults:
while CurrentMatch[1] > 0:
TitleResults[CurrentMatch[0]].append([Dict[CurrentMatch[2]][0], Dict[CurrentMatch[2]][1], 1, CurrentMatch[2]])
TitleResults[CurrentMatch[2]].append([Dict[CurrentMatch[0]][0], Dict[CurrentMatch[0]][1], 0, CurrentMatch[0]])
Dict[CurrentMatch[0]][3] += 1
Dict[CurrentMatch[2]][3] += 1
CurrentMatch[1] -= 1
while CurrentMatch[3] > 0:
TitleResults[CurrentMatch[2]].append([Dict[CurrentMatch[0]][0], Dict[CurrentMatch[0]][1], 1, CurrentMatch[0]])
TitleResults[CurrentMatch[0]].append([Dict[CurrentMatch[2]][0], Dict[CurrentMatch[2]][1], 0, CurrentMatch[2]])
Dict[CurrentMatch[0]][3] += 1
Dict[CurrentMatch[2]][3] += 1
CurrentMatch[3] -= 1
return TitleResults
def CountTitles(Person):
"""Returns the number of titles Person has participated in.
Person: a string."""
TitleNum = 0
for Title in Titles:
Dict = TitleDict(Title)
if Person in Dict:
if Dict[Person][3] > 0:
TitleNum += 1
return TitleNum
def ProcessRankings(ResultFiles, Title):
"""Processes all the Files in ResultFiles, and updates the Glicko2 values of each of the players.
ResultFiles: a list of files to be read, each of which is given as a string.
Title: a string in Titles; the Title the results are from.
Example: ProcessRankings(['Beast V', 'Paragon 2015', 'Apex 2015'], 'Melee')"""
Dict = TitleDict(Title)
TitleResults = {}
for File in ResultFiles:
FileData = GetGameData(File, Dict)
for Person in FileData:
if Person not in TitleResults:
TitleResults[Person] = FileData[Person]
else:
TitleResults[Person] += FileData[Person]
for Person in Dict:
DummyPerson = Player(rating = Dict[Person][0], rd = Dict[Person][1], vol = Dict[Person][2])
if Person in TitleResults:
OpponentRatings = [Title[0] for Title in TitleResults[Person]]
OpponentRDs = [Title[1] for Title in TitleResults[Person]]
Victories = [Title[2] for Title in TitleResults[Person]]
DummyPerson.update_player([x for x in OpponentRatings], [x for x in OpponentRDs], Victories)
Dict[Person] = [DummyPerson.rating, min(DummyPerson.rd, DefaultRD), DummyPerson.vol, Dict[Person][3], Dict[Person][4]]
if Person not in TitleResults:
DummyPerson.did_not_compete()
Dict[Person] = [DummyPerson.rating, min(DummyPerson.rd, DefaultRD), DummyPerson.vol, Dict[Person][3], Dict[Person][4]]
def PadLeft(s, Len, Padding = '0'):
"""Pads s on the left with Padding to length Len.
s: a string.
Len: an integer.
Padding: a string."""
while len(s) < Len:
s = Padding + s
return s
def TakeSublist(BinaryString, l):
"""Returns the sublist of l including every element of l whose corresponding value in BinaryString is 1.
BinaryString: a string whose characters are exclusively 0's and 1's.
l: a list."""
BinaryString = PadLeft(BinaryString, len(l), Padding = '0')
Sublist = []
for i in range(len(BinaryString)):
if BinaryString[i] == '1':
Sublist.append(l[i])
return Sublist
def DigitSum(DigitString):
"""Returns the sum of the digits for DigitString.
DigitString: a string whose characters are integers."""
i = 0
Sum = 0
while i < len(DigitString):
Sum += int(DigitString[i])
i +=1
return Sum
def DistinctSublists(l, SublistLen):
"""Returns all sublists of l with length SublistLen.
l: a list.
SublistLen: an integer."""
return [TakeSublist(BinaryString, l) for BinaryString in sorted([PadLeft(bin(i)[2:], len(l), Padding = '0') for i in range(2**len(l))], reverse = True)\
if DigitSum(BinaryString) == SublistLen]
def NormalsAverage(NormalParameters):
"""Returns the "average" of the normal distributions from NormalParameters.
NormalParameters: a list of length 2 lists, whose entries are the mean and SD of each normal distribution."""
return [sum([Parameter[0] for Parameter in NormalParameters])/(len(NormalParameters)), \
(sum([Parameter[1]**2 for Parameter in NormalParameters])/(len(NormalParameters)**2))**(1/2)]
def PersonAverages(Person, TitleMin = DefaultTitleMin, SortedBy = DefaultSort, SortedByTie = DefaultSortTie):
"""Returns a list of all averages of rankings of Person for TitleMin titles, sorted as indicated.
Person: a string.
TitleMin: the number of titles to be included in each average.
SortedBy: a string in Sortings; the primary method of sorting.
SortedByTie: a string in Sortings; the method of sorting in the event of a tie."""
Sums = []
for Sublist in DistinctSublists(Titles, TitleMin):
Check = True
for Title in Sublist:
Check = Check and Person in TitleDict(Title)
if Check:
SublistAverage = NormalsAverage([[TitleDict(Title)[Person][0], TitleDict(Title)[Person][1]] for Title in Sublist])
sb = Sortings.index(SortedBy)
sbt = Sortings.index(SortedByTie)
sbl = [i for i in range(len(Sortings)) if Sortings[i] not in [sb, sbt]][0]
Sums.append(\
[Sublist, \
SublistAverage[0] - 2*SublistAverage[1], \
SublistAverage[0] - SublistAverage[1], \
SublistAverage[0], \
sum([TitleDict(Title)[Person][3] for Title in Sublist])])
return sorted(Sums, key = lambda x: (x[sb + 1], x[sbt + 1], x[sbl + 1]), reverse = True)
def OverallPersonDict(TitleMin = DefaultTitleMin, SortedBy = DefaultSort, SortedByTie = DefaultSortTie):
"""Returns a dictionary with the best averages of Titlemin titles, where "best" is determined by the sorting as indicated.
TitleMin: the number of titles to be included in each average.
SortedBy: a string in Sortings; the primary method of sorting.
SortedByTie: a string in Sortings; the method of sorting in the event of a tie."""
Dict = {}
for Title in Titles:
for Person in TitleDict(Title):
if CountTitles(Person) >= TitleMin:
Dict[Person] = PersonAverages(Person, TitleMin, SortedBy, SortedByTie)[0]
Dict[Person].append([])
for Title in PersonAverages(Person, TitleMin, SortedBy, SortedByTie)[0][0]:
for File in TitleDict(Title)[Person][4]:
Dict[Person][5].append(File)
return Dict
def OverallRankingList(TitleMin = DefaultTitleMin, SortedBy = DefaultSort, SortedByTie = DefaultSortTie):
"""To be used in RankingList, if the Title chosen is 'Overall'."""
Dict = OverallPersonDict(TitleMin, SortedBy)
Rankings = [[Dict[Person][1], Dict[Person][2], Dict[Person][3], Person, TagDict[Person], Dict[Person][0], Dict[Person][4]] for Person in Dict]
shuffle(Rankings)
sb = Sortings.index(SortedBy)
sbt = Sortings.index(SortedByTie)
sbl = [i for i in range(len(Sortings)) if Sortings[i] not in [sb, sbt]][0]
Rankings.sort(key = lambda x: (x[sb], x[sbt], x[sbl]), reverse = True)
for i in range(len(Rankings)):
Rankings[i] = [i + 1, Rankings[i][0], Rankings[i][1], Rankings[i][2], Rankings[i][3], Rankings[i][4], Rankings[i][5], Rankings[i][6]]
return Rankings
def RankingList(Title, TitleMin = DefaultTitleMin, SortedBy = DefaultSort, SortedByTie = DefaultSortTie):
"""Returns a list with the rankings of all players in Title.
Title: a string in Titles; the Title whose rankings are to be determined.
TitleMin: the number of titles if the Title is 'Overall'.
SortedBy: a string in Sortings; the primary method of sorting.
SortedByTie: a string in Sortings; the method of sorting in the event of a tie."""
if Title == 'Overall':
return OverallRankingList(TitleMin, SortedBy, SortedByTie)
Dict = TitleDict(Title)
Rankings = [[Dict[Person][0] - 2*Dict[Person][1], Dict[Person][0] - Dict[Person][1], Dict[Person][0], Person, TagDict[Person], Dict[Person][3]] for Person in Dict]
shuffle(Rankings)
sb = Sortings.index(SortedBy)
sbt = Sortings.index(SortedByTie)
sbl = [i for i in range(len(Sortings)) if Sortings[i] not in [sb, sbt]][0]
Rankings.sort(key = lambda x: (x[sb], x[sbt], x[sbl]), reverse = True)
for i in range(len(Rankings)):
Rankings[i] = [i + 1, Rankings[i][0], Rankings[i][1], Rankings[i][2], Rankings[i][3], Rankings[i][4], Rankings[i][5]]
return Rankings
def ShowRankings(Title, TitleMin = DefaultTitleMin, SortedBy = DefaultSort, SortedByTie = DefaultSortTie, TopAmount = 1000000):
"""Shows the rankings for Title for the top TopAmount players.
Title: a string in Titles; the Title whose rankings are to be shown.
TitleMin: the number of titles if the Title is 'Overall'.
SortedBy: a string in Sortings; the primary method of sorting.
SortedByTie: a string in Sortings; the method of sorting in the event of a tie.
TopAmount: the maximum number of players to be shown.
Example: ShowRankings('Melee', TitleMin = 2, SortedBy = 'Low', SortedByTie = 'Middle', TopAmount = 25)"""
print(Title + '\t\t\t\t\t\t\t Estimate')
Rankings = RankingList(Title, TitleMin, SortedBy, SortedByTie)
if Title == 'Overall':
Dict = OverallPersonDict(TitleMin, SortedBy, SortedByTie)
print('Place\tTag\t\tName\t\t\t Bottom\t\t Low\t\t Middle\t\tGames\tBest Title' + 's'*(TitleMin > 1))
TitleTotal = 0
for i in range(len(Rankings)):
if i < TopAmount:
Person = Rankings[i]
print(str(Person[0]) + '\t' + \
Person[4] + '\t'*(abs(int((15 - max(len(Person[4]), 1))/8))) + '\t' + \
Person[5] + '\t'*(abs(int((23 - max(len(Person[5]), 1))/8))) + '\t' + \
PadLeft(format(Person[1], Rounding), 7, Padding = ' ') + '\t' + '\t' + \
PadLeft(format(Person[2], Rounding), 7, Padding = ' ') + '\t' + '\t' + \
PadLeft(format(Person[3], Rounding), 7, Padding = ' ') + '\t' + '\t' + \
PadLeft(str(Person[7]), 5, Padding = ' ') + '\t' + \
str(Person[6])[1:-1].replace("'", ''))
TitleTotal += Rankings[i][7]
TotalSpacing = '\t'*11 + ' '*(5 - len(str(TitleTotal)))
else:
Dict = TitleDict(Title)
print('Place\tTag\t\tName\t\t\t Bottom\t\t Low\t\t Middle\t\tGames')
TitleTotal = 0
for i in range(len(Rankings)):
if i < TopAmount:
Person = Rankings[i]
print(str(Person[0]) + '\t' + \
Person[4] + '\t'*(abs(int((15 - max(len(Person[4]), 1))/8))) + '\t' + \
Person[5] + '\t'*(abs(int((23 - len(Person[5]))/8))) + '\t' + \
PadLeft(format(Person[1], Rounding), 7, Padding = ' ') + '\t' + '\t' + \
PadLeft(format(Person[2], Rounding), 7, Padding = ' ') + '\t' + '\t' + \
PadLeft(format(Person[3], Rounding), 7, Padding = ' ') + '\t' + '\t' + \
PadLeft(str(Person[6]), 5, Padding = ' '))
TitleTotal += Rankings[i][6]
TotalSpacing = '\t'*11 + ' '*(5 - len(str(TitleTotal)))
print('Total Games' + TotalSpacing, TitleTotal, sep = '')
def WriteTxtRankings(IncludedTitles, TxtFile, TitleMin = DefaultTitleMin, SortedBy = DefaultSort, SortedByTie = DefaultSortTie, LinesBetween = DefaultLines):
"""Writes a TxtFile for the titles in IncludedTitles.
IncludedTitles: a list of string(s) in Titles; the Title(s) whose rankings are to be written.
TxtFile: a string; the name of the file to be written.
TitleMin: the number of titles if the Title is 'Overall'.
SortedBy: a string in Sortings; the primary method of sorting.
SortedByTie: a string in Sortings; the method of sorting in the event of a tie.
Example: WriteTxtRankings(['Melee', 'Sm4sh'], 'MeleeSm4shRankings', TitleMin = 2, SortedBy = 'Low', SortedByTie = 'Middle', LinesBetween = 2)"""
TxtFile = Addtxt(TxtFile)
f = open(TxtFile, 'w')
if type(IncludedTitles) != list:
IncludedTitles = [IncludedTitles]
for Title in IncludedTitles:
f.write(Title + '\t\t\t\t\t\t\t Estimate\n')
Rankings = RankingList(Title, TitleMin, SortedBy, SortedByTie)
if Title == 'Overall':
Dict = OverallPersonDict(TitleMin, SortedBy, SortedByTie)
f.write('Place\tTag\t\tName\t\t\t Bottom\t\t Low\t\t Middle\t\tGames\tBest Title' + 's'*(TitleMin > 1) + '\n')
TitleTotal = 0
for i in range(len(Rankings)):
Person = Rankings[i]
f.write(str(Person[0]) + '\t' +\
Person[4] + '\t'*(abs(int((15 - max(len(Person[4]), 1))/8))) + '\t' + \
Person[5] + '\t'*(abs(int((23 - max(len(Person[5]), 1))/8))) + '\t' + \
PadLeft(format(Person[1], Rounding), 7, Padding = ' ') + '\t' + '\t' + \
PadLeft(format(Person[2], Rounding), 7, Padding = ' ') + '\t' + '\t' + \
PadLeft(format(Person[3], Rounding), 7, Padding = ' ') + '\t' + '\t' +\
PadLeft(str(Person[7]), 5, Padding = ' ') + '\t' +\
str(Person[6])[1:-1].replace("'", '') + '\n')
TitleTotal += Rankings[i][7]
TotalSpacing = '\t'*11 + ' '*(5 - len(str(TitleTotal)))
else:
Dict = TitleDict(Title)
f.write('Place\tTag\t\tName\t\t\t Bottom\t\t Low\t\t Middle\t\tGames\n')
TitleTotal = 0
for i in range(len(Rankings)):
Person = Rankings[i]
f.write(str(Person[0]) + '\t' + \
Person[4] + '\t'*(abs(int((15 - max(len(Person[4]), 1))/8))) + '\t' + \
Person[5] + '\t'*(abs(int((23 - len(Person[5]))/8))) + '\t' + \
PadLeft(format(Person[1], Rounding), 7, Padding = ' ') + '\t' + '\t' + \
PadLeft(format(Person[2], Rounding), 7, Padding = ' ') + '\t' + '\t' + \
PadLeft(format(Person[3], Rounding), 7, Padding = ' ') + '\t' + '\t' + \
PadLeft(str(Person[6]), 5, Padding = ' ') + '\n')
TitleTotal += Rankings[i][6]
TotalSpacing = '\t'*11 + ' '*(5 - len(str(TitleTotal)))
f.write('Total Games' + TotalSpacing + str(TitleTotal))
if Title != (IncludedTitles)[-1]:
f.write('\n'*(LinesBetween + 1))
f = open(TxtFile, 'r+')
f.close()
def WriteMobileRankings(IncludedTitles, TxtFile, TitleMin = DefaultTitleMin, SortedBy = DefaultSort, SortedByTie = DefaultSortTie, LinesBetween = DefaultLines):
"""Writes a TxtFile for the titles in IncludedTitles, in a mobile friendly format.
IncludedTitles: a list of string(s) in Titles; the Title(s) whose rankings are to be written.
TxtFile: a string; the name of the file to be written.
TitleMin: the number of titles if the Title is 'Overall'.
SortedBy: a string in Sortings; the primary method of sorting.
SortedByTie: a string in Sortings; the method of sorting in the event of a tie.
Example: WriteMobileRankings(['Melee', 'Sm4sh'], 'MeleeSm4shRankingsMobile', TitleMin = 2, SortedBy = 'Low', SortedByTie = 'Middle', LinesBetween = 2)"""
TxtFile = Addtxt(TxtFile)
f = open(TxtFile, 'w')
if type(IncludedTitles) != list:
IncludedTitles = [IncludedTitles]
f.write('Place - Tag / Name: Games Played\n(Best Title(s) if Overall)\nLow, Middle, High Estimates\n\n')
for Title in IncludedTitles:
f.write(Title + '\n')
Rankings = RankingList(Title, TitleMin, SortedBy, SortedByTie)
if Title == 'Overall':
Dict = OverallPersonDict(TitleMin, SortedBy, SortedByTie)
TitleTotal = 0
for i in range(len(Rankings)):
Person = Rankings[i]
f.write(str(Person[0]) + ' - ' + \
Person[4] + ' / ' + \
Person[5] + ': ' + \
str(Person[7]) + '\n' + \
str(Person[6])[1:-1].replace("'", '') + '\n' + \
format(Person[1], Rounding) + ', ' + \
format(Person[2], Rounding) + ', ' + \
format(Person[3], Rounding) + '\n\n')
TitleTotal += Rankings[i][7]
else:
Dict = TitleDict(Title)
TitleTotal = 0
for i in range(len(Rankings)):
Person = Rankings[i]
f.write(str(Person[0]) + ' - ' + \
Person[4] + ' / ' + \
Person[5] + ': ' + \
str(Person[6]) + '\n' + \
format(Person[1], Rounding) + ', ' + \
format(Person[2], Rounding) + ', ' + \
format(Person[3], Rounding) + '\n\n')
TitleTotal += Rankings[i][6]
f.write('Total Games: ' + str(TitleTotal))
if Title != (IncludedTitles)[-1]:
f.write('\n'*(LinesBetween + 1))
f = open(TxtFile, 'r+')
f.close()
def WriteCSVRankings(IncludedTitles, CSVFile, TitleMin = DefaultTitleMin, SortedBy = DefaultSort, SortedByTie = DefaultSortTie, LinesBetween = DefaultLines):
"""Writes a CSVFile (Excel) for the titles in IncludedTitles.
IncludedTitles: a list of string(s) in Titles; the Title(s) whose rankings are to be written.
CSVFile: a string; the name of the file to be written.
TitleMin: the number of titles if the Title is 'Overall'.
SortedBy: a string in Sortings; the primary method of sorting.
SortedByTie: a string in Sortings; the method of sorting in the event of a tie.
Example: WriteCSVRankings(['Melee', 'Sm4sh'], 'MeleeSm4shRankings', TitleMin = 2, SortedBy = 'Low', SortedByTie = 'Middle', LinesBetween = 2)"""
if CSVFile[-4:] != '.csv':
CSVFile = CSVFile + '.csv'
f = open(CSVFile, 'w', newline='')
spamwriter = csv.writer(f, quoting=csv.QUOTE_MINIMAL)
if type(IncludedTitles) != list:
IncludedTitles = [IncludedTitles]
for Title in IncludedTitles:
spamwriter.writerow([Title, '', '', '', 'Estimate'])
Rankings = RankingList(Title, TitleMin, SortedBy, SortedByTie)
if Title == 'Overall':
Dict = OverallPersonDict(TitleMin, SortedBy, SortedByTie)
spamwriter.writerow(['Place', 'Tag', 'Name', 'Bottom', 'Low', 'Middle', 'Games', 'Best Title'+('s'*(TitleMin > 1))])
TitleTotal = 0
for i in range(len(Rankings)):
Person = Rankings[i]
spamwriter.writerow([Person[0], Person[4], Person[5], Person[1], Person[2], Person[3], Person[7], str(Person[6])[1:-1].replace("'", '')])
TitleTotal += Rankings[i][7]
TotalSpacing = '\t'*11 + ' '*(5 - len(str(TitleTotal)))
else:
Dict = TitleDict(Title)
spamwriter.writerow(['Place', 'Tag', 'Name', 'Bottom', 'Low', 'Middle', 'Games'])
TitleTotal = 0
for i in range(len(Rankings)):
Person = Rankings[i]
spamwriter.writerow([Person[0], Person[4], Person[5], Person[1], Person[2], Person[3], Person[6]])
TitleTotal += Rankings[i][6]
TotalSpacing = '\t'*11 + ' '*(5 - len(str(TitleTotal)))
spamwriter.writerow(['Total Games', '', '', '', '', '', TitleTotal])
if Title != IncludedTitles[-1]:
i = 0
while i < LinesBetween:
spamwriter.writerow([])
i += 1
f.close()
def ShowTabSepRankings(Title, TitleMin = DefaultTitleMin, SortedBy = DefaultSort, SortedByTie = DefaultSortTie, TopAmount = 1000000):
"""Shows the rankings for Title for the top TopAmount players in tab-separated format for copying to a spreadsheet.
Title: a string in Titles; the Title whose rankings are to be shown.
TitleMin: the number of titles if the Title is 'Overall'.
SortedBy: a string in Sortings; the primary method of sorting.
SortedByTie: a string in Sortings; the method of sorting in the event of a tie.
TopAmount: the maximum number of players to be shown.
Example: ShowRankings('Melee', TitleMin = 2, SortedBy = 'Low', SortedByTie = 'Middle', TopAmount = 25)"""
#print(Title + '\t\t\t\t\t\t\t Estimate')
Rankings = RankingList(Title, TitleMin, SortedBy, SortedByTie)
TooHighVariance = []
Dict = TitleDict(Title)
print('Place\tBottom\tLow\tMiddle\tGames')
TitleTotal = 0
for i in range(len(Rankings)):
if i < TopAmount:
Person = Rankings[i]
if int(Person[3]) - int(Person[2]) > VarianceCutoff:
TopAmount += 1
TooHighVariance.append(Person)
else:
print(Person[4] + '\t' +
str(format(Person[1], Rounding)) + '\t' +
str(format(Person[2], Rounding)) + '\t' +
str(format(Person[3], Rounding)) + '\t' +
str(Person[6]))
TitleTotal += Rankings[i][6]
total = 0
people = 0
for i in range(len(Rankings)):
Person = Rankings[i]
total += int(Person[6])
people += 1
print("Players with high scores but also high variance:")
print('Tag\tRating\tGames\tVariance')
for Person in TooHighVariance:
print(Person[4] + '\t' +
str(format(Person[2], Rounding)) + '\t' +
str(Person[6]) + '\t' +
str(format(int(Person[3])-int(Person[2]), Rounding)))
print("Total games in database: " + str(int(total/2)))
print("Total players in database: " + str(people))
def ShowAllRankings(SortedBy = DefaultSort, SortedByTie = DefaultSortTie, TitleMin = DefaultTitleMin, LinesBetween = DefaultLines, TopAmount = 1000000):
"""Runs ShowRankings for each Title in Titles, with LinesBetween lines between each.
Example: ShowAllRankings(SortedBy = 'Low', SortedByTie = 'Middle', TitleMin = 2, LinesBetween = 2, TopAmount = 25)"""
for Title in ['Overall'] + Titles:
ShowRankings(Title, TitleMin, SortedBy, SortedByTie, TopAmount)
if Title != (['Overall'] + Titles)[-1]:
print('\n'*(LinesBetween - 1))
def WriteTxtAllRankings(TxtFile, TitleMin = DefaultTitleMin, SortedBy = DefaultSort, SortedByTie = DefaultSortTie, LinesBetween = DefaultLines):
"""Runs WriteTxtRankings for each Title in Tiles, with LinesBetween lines between each.
Example: WriteTxtAllRankings('Rankings', TitleMin = 2, SortedBy = 'Low', SortedByTie = 'Middle', LinesBetween = 2)"""
WriteTxtRankings(['Overall'] + Titles, TxtFile, TitleMin, SortedBy, SortedByTie, LinesBetween)
def WriteMobileAllRankings(TxtFile, TitleMin = DefaultTitleMin, SortedBy = DefaultSort, SortedByTie = DefaultSortTie, LinesBetween = DefaultLines):
"""Runs WriteMobileRankings for each Title in Tiles, with LinesBetween lines between each.
Example: WriteMobileAllRankings('RankingsMobile', TitleMin = 2, SortedBy = 'Low', SortedByTie = 'Middle', LinesBetween = 2)"""
WriteMobileRankings(['Overall'] + Titles, TxtFile, TitleMin, SortedBy, SortedByTie, LinesBetween)
def WriteCSVAllRankings(CSVFile, TitleMin = DefaultTitleMin, SortedBy = DefaultSort, SortedByTie = DefaultSortTie, LinesBetween = DefaultLines):
"""Runs WriteCSVRankings for each Title in Tiles, with LinesBetween lines between each.
Example: WriteCSVAllRankings('Rankings', TitleMin = 2, SortedBy = 'Low', SortedByTie = 'Middle', LinesBetween = 2)"""
WriteCSVRankings(['Overall'] + Titles, CSVFile, TitleMin, SortedBy, SortedByTie, LinesBetween)
def ToTag(Person):
"""Returns the tag of Person.
Person: a string."""
if Person not in TagDict:
Person = NameDict[Person]
return Person
def SingleRanking(Person, Title, TitleMin = DefaultTitleMin, SortedBy = DefaultSort, SortedByTie = DefaultSortTie):
"""Returns the ranking info of Person in Title, where their rank within title is determined with the sorting as indicated.
Person: a string.
Title: a string in Titles; the Title whose rankings are to be determined.
TitleMin: the number of titles if the Title is 'Overall'.
SortedBy: a string in Sortings; the primary method of sorting.
SortedByTie: a string in Sortings; the method of sorting in the event of a tie."""
Person = ToTag(Person)
Rankings = RankingList(Title, TitleMin, SortedBy, SortedByTie)
return Rankings[[Ranking[4] for Ranking in Rankings].index(Person)]
def PersonRankings(Person, TitleMin = DefaultTitleMin, SortedBy = DefaultSort, SortedByTie = DefaultSortTie):
"""Prints all the ranking info of each title for Person, as well as which files they appear in.
Person: a string.
TitleMin: the number of titles if the Title is 'Overall'.
SortedBy: a string in Sortings; the primary method of sorting.
SortedByTie: a string in Sortings; the method of sorting in the event of a tie.
Example: PersonRankings('Mew2King', TitleMin = 2, SortedBy = 'Low', SortedByTie = 'Middle')"""
Person = ToTag(Person)
OverallDict = OverallPersonDict(TitleMin, SortedBy, SortedByTie)
print('\t\t\t\t Estimate')
print('Title\tRank\t\t Bottom\t\t Low\t\t Middle\t\tGames' + '\tBest titles'*(Person in OverallDict and CountTitles(Person) >= TitleMin))
if Person in OverallDict and CountTitles(Person) >= TitleMin:
Ranking = SingleRanking(Person, 'Overall', TitleMin, SortedBy, SortedByTie)
print(\
'Overall', \
str(Ranking[0]) + ' of ' + str(len(OverallDict)) + \
'\t'*(abs(int((15 - max(len(str(Ranking[0]) + ' of ' + str(len(OverallDict))), 1))/8))), \
PadLeft(format(Ranking[1], Rounding), 7, Padding = ' ') + '\t', \
PadLeft(format(Ranking[2], Rounding), 7, Padding = ' ') + '\t', \
PadLeft(format(Ranking[3], Rounding), 7, Padding = ' ') + '\t', \
PadLeft(str(Ranking[7]), 5, Padding = ' '), \
str(Ranking[6])[1:-1].replace("'", ''), \
sep = '\t'
)
for Title in Titles:
if Person in TitleDict(Title):
Dict = TitleDict(Title)
Ranking = SingleRanking(Person, Title, TitleMin, SortedBy, SortedByTie)
print(\
Title, \
str(Ranking[0]) + ' of ' + str(len(Dict)) + '\t'*(abs(int((15 - max(len(str(Ranking[0]) + ' of ' + str(len(Dict))), 1))/8))), \
PadLeft(format(Ranking[1], Rounding), 7, Padding = ' ') + '\t', \
PadLeft(format(Ranking[2], Rounding), 7, Padding = ' ') + '\t', \
PadLeft(format(Ranking[3], Rounding), 7, Padding = ' ') + '\t', \
PadLeft(str(Ranking[6]), 5, Padding = ' '),
sep = '\t'\
)
print(Person + ' appears in the following files: ')
for File in FileList(Person):
print(File)
def AllNames(PersonList):
"""Returns True if every Person in PersonList has a name in TagDict.
PersonList: the tags to be checked."""
Check = True
for Person in PersonList:
if Person in TagDict:
Check = Check and TagDict[Person]!=''
else:
Check = False
return Check
def FileList(Person):
PersonFileList = []
for Title in Titles:
if Person in TitleDict(Title):
for File in TitleDict(Title)[Person][4]:
PersonFileList.append(File)
return PersonFileList
def NoNames(PersonList = [], Files = True):
"""Prints the names of all tags with no name, as well as which files they appear in, if indicated.
PersonList: the tags to be checked. If left as the default, all tags that have been processed will be checked.
Example: NoNames(['Mew2King', 'Axe', 'RandomPerson420'], Files = True)"""
if PersonList == []:
for Title in Titles:
for Person in TitleDict(Title):
PersonList.append(Person)
if not AllNames(PersonList):
print('The following tags do not have names in TagDict:')
PersonList = set(PersonList)
for Person in PersonList:
if Person not in TagDict:
TagDict[Person] = ''
if TagDict[Person]=='':
print(Person + (', who appears in the following: ' + ', '.join(FileList(Person)))*(len(FileList(Person)) >= 1)*Files)
else:
print('Everyone has a name in TagDict. Hooray!')
def NestedQ(l):
"""Returns True if every item in List is a list or a tuple.
l: a list."""
return {type(item) for item in l} in [{list}, {tuple}]
def FlattenShallowList(ShallowList):
"""Flattens a shallow list.
ShallowList: a list of depth at most two."""
if not NestedQ(ShallowList):
return ShallowList
else:
return list(sum(ShallowList, ()))
def DeleteDuplicates(l):
"""Deletes duplicate objects in List, where tuples and lists in nested lists are treated as unordered.
l: a list."""
unique = []
if NestedQ(l):
for item in l:
if sorted(item) not in unique:
if type(item) == tuple:
unique.append(tuple(sorted(item)))
else:
unique.append(sorted(item))
else:
for item in l:
if item not in unique:
unique.append(item)
return unique
def EveryoneCompeted(PersonList, Title):
"""Returns True if everyone in PersonList has competed in Title previously.
PersonList: a list of strings.
Title: a string in Titles."""
PersonList = FlattenShallowList(PersonList)
Check = True
Dict = {Person:TitleDict(Title)[Person] for Person in TitleDict(Title)}
for Person in PersonList:
AddPerson(Person, Dict)
Check = Check and Dict[Person][3] > 0
return Check
def YetToCompete(PersonList, Title):
"""Prints the Persons in PersonList who have not yet competed in Title.
PersonList: a list of strings.
Title: a string in Titles.
Example: YetToCompete(['Mew2King', 'Axe', 'RandomPerson420'], 'Melee')"""
PersonList = DeleteDuplicates(FlattenShallowList(PersonList))
Dict = {Person:TitleDict(Title)[Person] for Person in TitleDict(Title)}
if not EveryoneCompeted(PersonList, Title):
print('The following players have not competed previously in ' + Title + ':')
for Person in PersonList:
AddPerson(Person, Dict)
if Dict[Person][3] == 0:
print(Person)
else:
print('Everyone has competed previously in ' + Title + '. Hooray!')
def Seeding(EntrantList, Title, SortedBy = DefaultSort, SortedByTie = DefaultSortTie):
"""Returns EntrantList sorted by the seeding for Title, with seeding determined as indicated,
with complete ties seeded randomly. Those who have not yet competed are given default rating values.
EntrantList: a list of strings (or a list of tuples whose values are strings, for teams).
Title: a string in Titles.
SortedBy: a string in Sortings; the primary method of sorting.
SortedByTie: a string in Sortings; the method of sorting in the event of a tie."""
EntrantList = DeleteDuplicates([Replacements(Entrant) for Entrant in EntrantList])
shuffle(EntrantList)
Dict = {Person:TitleDict(Title)[Person] for Person in TitleDict(Title)}
SeedList = []
Singles = not NestedQ(EntrantList)
if Singles:
for Entrant in EntrantList:
AddPerson(Entrant, Dict)
EntrantInfo = Dict[Entrant]
SeedList.append([Entrant, EntrantInfo[0] - 2*EntrantInfo[1], EntrantInfo[0] - EntrantInfo[1], EntrantInfo])
if not Singles:
for Team in EntrantList:
for Person in Team:
AddPerson(Person, Dict)
TeamInfoStart = [Dict[Person] for Person in Team]
TeamInfo = NormalsAverage([[PersonInfo[0], PersonInfo[1]] for PersonInfo in TeamInfoStart])
SeedList.append([Team, TeamInfo[0] - 2*TeamInfo[1], TeamInfo[0] - TeamInfo[1], TeamInfo])
sb = Sortings.index(SortedBy)
sbt = Sortings.index(SortedByTie)
sbl = [i for i in range(len(Sortings)) if Sortings[i] not in [sb, sbt]][0]
SeedList.sort(key = lambda x: (x[sb + 1], x[sbt + 1], x[sbl + 1]), reverse = True)
return [Seed[0] for Seed in SeedList]
def ShowSeeding(EntrantList, Title, SortedBy = DefaultSort, SortedByTie = DefaultSortTie, Nums = False):
"""Prints the seeds for EntrantList, with seeding determined as indicated, for Title,
printing the numbers of the seeds if indicated.
EntrantList: a list of strings (or a list of tuples whose values are strings, for teams).
Title: a string in Titles.
SortedBy: a string in Sortings; the primary method of sorting.
SortedByTie: a string in Sortings; the method of sorting in the event of a tie.
Nums: whether the numbers of the seeds shall be shown.
Example (Singles):
ShowSeeding([\
'Armada',
'PPMD',
'Leffen',
'Hungrybox',
'MaNg0',
'Hax',
'Mew2King',
'Silent Wolf',
'aMSa',
'Ice',
'Westballz',
'Shroomed',
'Colbol',
'Axe',
'Lucky',
'Javi',
'Fiction',
'Cactuar',
'Kalamazhu',
'Wizzrobe',
'Kels',
'SFAT',
'The Moon',
'Weon-X',
'Plup'],
'Melee', SortedBy = 'Low', SortedByTie = 'Middle', Nums = False)
Example (Doubles):
ShowSeeding([\
('Armada', 'PPMD'),
('Leffen', 'Hungrybox'),
('MaNg0', 'Hax'),
('Mew2King', 'Silent Wolf'),
('aMSa', 'Ice'),
('Westballz','Shroomed'),
('Colbol','Axe'),
('Lucky','Javi'),
('Fiction','Cactuar'),
('Kalamazhu','Wizzrobe'),
('Kels','SFAT'),
('The Moon','Weon-X')],
'Melee', SortedBy = 'Low', SortedByTie = 'Middle', Nums = False)"""
EntrantList = Seeding(EntrantList, Title, SortedBy, SortedByTie)
NoNames(FlattenShallowList(EntrantList), Files = False)
print()
YetToCompete(FlattenShallowList(EntrantList), Title)
print()
Singles = not NestedQ(EntrantList)
if Singles:
for i in range(len(EntrantList)):
print((str(i + 1) + ':' + ' '*(1 + len(str(len(EntrantList))) - len(str(i + 1))))*Nums, EntrantList[i], sep = '')
if not Singles:
for i in range(len(EntrantList)):
print((str(i + 1) + ':' + ' '*(1 + len(str(len(EntrantList))) - len(str(i + 1))))*Nums, ', '.join([Player for Player in EntrantList[i]]), sep = '')
def Pooling(EntrantList, Title, PoolSize = 5, Seeded = False, SortedBy = DefaultSort, SortedByTie = DefaultSortTie):
"""Returns EntrantList divided into pools with PoolSize persons in each
by the seeding for Title, with seeding determined as indicated, with complete ties seeded randomly.
Those who have not yet competed are given default rating values.
EntrantList: a list of strings (or a list of tuples whose values are strings, for teams).
PoolSize: the number to be in each pool.
Title: a string in Titles.
Seeded: whether EntrantList is already seeded or not.
SortedBy: a string in Sortings; the primary method of sorting.
SortedByTie: a string in Sortings; the method of sorting in the event of a tie."""
EntrantList = DeleteDuplicates([Replacements(Entrant) for Entrant in EntrantList])
if Seeded == False:
EntrantList = Seeding(EntrantList, Title, SortedBy, SortedByTie)
PoolsNum = ceil(len(EntrantList)/PoolSize)
Pools = [[] for i in range(PoolsNum)]
for i in range(PoolsNum):
if EntrantList != []:
Pools[i] += [EntrantList[0]]
EntrantList = EntrantList[1:]
else:
pass
for i in range(PoolsNum):
if EntrantList != []:
Pools[-i - 1] += [EntrantList[0]]
EntrantList = EntrantList[1:]
else:
pass
while len(EntrantList) >= 2*PoolsNum:
for i in range(PoolsNum):
if EntrantList != []:
Pools[-i - 1] += [EntrantList[0]]
EntrantList = EntrantList[1:]
else:
pass
for i in range(PoolsNum):
if EntrantList != []:
Pools[i] += [EntrantList[0]]
EntrantList = EntrantList[1:]
else:
pass
while EntrantList != []:
for i in range(PoolsNum):
if EntrantList != []:
Pools[-i - 1] += [EntrantList[0]]
EntrantList = EntrantList[1:]
else:
pass
return Pools
def ShowPoolingInner(EntrantList, Title, PoolSize = 5, Seeded = False, SortedBy = DefaultSort, SortedByTie = DefaultSortTie):
"""To be used in ShowPooling and ShowPoolingFromPools."""
PoolList = Pooling(EntrantList, Title, PoolSize, Seeded, SortedBy, SortedByTie)
if NestedQ(EntrantList):
for i in range(len(PoolList)):
print('Pool ' + str(i + 1) + ' (' + str(len(PoolList[i])) + ' teams)' + \
' '*(len(str(len(PoolList))) - len(str(i + 1)) + len(str(PoolSize)) - len(str(len(PoolList[i])))))
print(*[', '.join([PoolList[i][a][j] for j in range(len(PoolList[i][a]))]) for a in range(len(PoolList[i]))], sep = '\n')
if i!= len(PoolList) - 1:
print()
else:
for i in range(len(PoolList)):
print('Pool ' + str(i + 1) + ' (' + str(len(PoolList[i])) + ' players)' + \
' '*(len(str(len(PoolList))) - len(str(i + 1)) + len(str(PoolSize)) - len(str(len(PoolList[i])))))
print(*PoolList[i], sep = '\n')
print()
print('Here is the PoolList in raw form. Save this to be used with WriteTxtFromPools.')
print(PoolList)
def ShowPooling(EntrantList, Title, PoolSize = 5, Seeded = False, SortedBy = DefaultSort, SortedByTie = DefaultSortTie):
"""Prints the pools with PoolSize persons in each for EntrantList, with seeding determined as indicated, for Title.
EntrantList: a list of strings (or a list of tuples whose values are strings, for teams).
Title: a string in Titles.
PoolSize: the number to be in each pool.
SortedBy: a string in Sortings; the primary method of sorting.
SortedByTie: a string in Sortings; the method of sorting in the event of a tie.
Example (Singles):
ShowPooling([\
'Armada',
'PPMD',
'Leffen',
'Hungrybox',
'MaNg0',
'Hax',
'Mew2King',
'Silent Wolf',
'aMSa',
'Ice',
'Westballz',
'Shroomed',
'Colbol',
'Axe',
'Lucky',
'Javi',
'Fiction',
'Cactuar',
'Kalamazhu',
'Wizzrobe',
'Kels',
'SFAT',
'The Moon',
'Weon-X',
'Plup'],
'Melee', PoolSize = 5, Seeded = False, SortedBy = 'Low', SortedByTie = 'Middle')
Example (Doubles):
ShowPooling([\
('Armada', 'PPMD'),
('Leffen', 'Hungrybox'),
('MaNg0', 'Hax'),
('Mew2King', 'Silent Wolf'),
('aMSa', 'Ice'),
('Westballz','Shroomed'),
('Colbol','Axe'),
('Lucky','Javi'),
('Fiction','Cactuar'),
('Kalamazhu','Wizzrobe'),
('Kels','SFAT'),
('The Moon','Weon-X')],
'Melee', PoolSize = 5, Seeded = False, SortedBy = 'Low', SortedByTie = 'Middle')"""
EntrantList = DeleteDuplicates([Replacements(Entrant) for Entrant in EntrantList])
NoNames(FlattenShallowList(EntrantList), Files = False)
print()
YetToCompete(FlattenShallowList(EntrantList), Title)
print()
ShowPoolingInner(EntrantList, Title, PoolSize, Seeded, SortedBy, SortedByTie)
def SeedingFromPools(PoolQualifiers):
"""Returns the persons in PoolQualifiers sorted by seeding as determined by their place in the pool.
PoolQualifiers: A list of lists of qualifiers from pools, where the sublists are in order from strongest
to weakest pool, and each sublist is ordered from top to bottom qualifier."""
SeedList = []
for i in range(max([len(PoolQualifiers[j]) for j in range(len(PoolQualifiers))])):
if (i%2 == 0) ^ (i > 1):
for Sublist in PoolQualifiers:
if i < len(Sublist):
SeedList.append(Sublist[i])
else:
for Sublist in PoolQualifiers[::-1]:
if i < len(Sublist):
SeedList.append(Sublist[i])
return SeedList
def ShowSeedingFromPools(PoolQualifiers, Nums = False):
"""Prints the seeds for the persons in PoolQualifiers, printing the numbers of the seeds if indicated.
PoolQualifiers: A list of lists of qualifiers from pools, where the sublists are in order from strongest
to weakest pool, and each sublist is ordered from top to bottom qualifier.
Nums: whether the numbers of the seeds shall be shown.
Example (Singles): ShowSeedingFromPools([['Armada', 'Ice'], ['PPMD', 'aMSa'], ['Leffen', 'Silent Wolf'], ['Hungrybox', 'Mew2King'], ['MaNg0', 'Hax']], Nums = False)
Example (Doubles):
ShowSeedingFromPools(\
[[('Armada', 'PPMD'), ('Shroomed', 'Westballz')],
[('Hungrybox', 'Leffen'), ('Ice', 'aMSa')],
[('Hax', 'MaNg0'), ('Mew2King', 'Silent Wolf')]],
Nums = False)"""
PoolQualifiers = SeedingFromPools(PoolQualifiers)
if type(PoolQualifiers[0]) == tuple:
for i in range(len(PoolQualifiers)):
print((str(i + 1) + ':' + ' '*(1 + len(str(len(PoolQualifiers))) - len(str(i + 1))))*Nums, ', '.join([PoolQualifiers[i][j] for j in range(len(PoolQualifiers[i]))]), sep = '')
else:
for i in range(len(PoolQualifiers)):
print((str(i + 1) + ':' + ' '*(1 + len(str(len(PoolQualifiers))) - len(str(i + 1))))*Nums, PoolQualifiers[i], sep = '')
def ShowPoolingFromPools(PoolQualifiers, PoolSize = 5):
"""For use in tournnaments with multiple rounds of pool. Prints pools for the persons in PoolQualifiers.
PoolQualifiers: A list of lists of qualifiers from pools, where the sublists are in order from strongest
to weakest pool, and each sublist is ordered from top to bottom qualifier.
PoolSize: the number to be in each new pool.
Example (Singles): ShowPoolingFromPools([['Armada', 'Ice'], ['PPMD', 'aMSa'], ['Leffen', 'Silent Wolf'], ['Hungrybox', 'Mew2King'], ['MaNg0', 'Hax']], PoolSize = 5)
Example (Doubles):
ShowPoolingFromPools(\
[[('Armada', 'PPMD'), ('Shroomed', 'Westballz')],
[('Hungrybox', 'Leffen'), ('Ice', 'aMSa')],
[('Hax', 'MaNg0'), ('Mew2King', 'Silent Wolf')]],
PoolSize = 5)"""
EntrantList = DeleteDuplicates([Replacements(Entrant) for Entrant in SeedingFromPools(PoolQualifiers)])
ShowPoolingInner(EntrantList, Title = 'SSB', PoolSize = PoolSize, Seeded = True) # Title is just here as a placeholder, doesn't actually do anything
def WriteTxtFromPools(PoolList, TxtFile):
"""Prompts you to enter the results for matches from pools, then writes those results to a .txt file.
PoolList: a list of lists of strings. Each sublist is a pool, with each string in the sublist being one of the players in the pool.
TxtFile: a string; the name of the file to be written.
Example:
WriteTxtFromPools(\
[['Armada', 'Ice', 'Lucky', 'Javi', 'Plup'],
['PPMD', 'aMSa', 'Axe', 'Fiction', 'Weon-X'],
['Leffen', 'Silent Wolf', 'Colbol', 'Cactuar', 'The Moon'],
['Hungrybox', 'Mew2King', 'Shroomed', 'Kalamazhu', 'SFAT'],
['MaNg0', 'Hax', 'Westballz', 'Wizzrobe', 'Kels']],
'PoolResults')"""
print("Remember, enter matches with DQ'ed person(s) as 0 wins for both players.")
print()
TxtFile = Addtxt(TxtFile)
f = open(TxtFile, 'w')
for i in range(len(PoolList)):
Pool = PoolList[i]
print('Pool ' + str(i + 1) + ':')
PairList = DistinctSublists(Pool, 2)
for Pair in PairList:
print('Match: ' + Pair[0] + ' vs. ' + Pair[1])
P1Wins = input('How many games did ' + Pair[0] + ' win? ')
P2Wins = input('How many games did ' + Pair[1] + ' win? ')
print()
f.write(Pair[0] + ' ' + P1Wins + ' ' + Pair[1] + ' ' + P2Wins)
if Pair != PairList[-1] or Pool != PoolList[-1]:
f.write('\n')
if i != len(PoolList) - 1:
print()
f = open(TxtFile, 'r+')
f.close()
def FunctionInfo(f):
"""Prints the header for f.
f: a function.
Example: FunctionInfo(WriteTxtFromChallonge)"""
print("{}{}".format(f.__name__, signature(f)))
docstring = getdoc(f)
if type(docstring) == str:
docstring = docstring.replace('\n', '\n\t')
if docstring == None:
docstring = 'No docstring'
print('\t', docstring, sep = '')
UsefulFunctionList = [WriteTxtFromChallonge, WriteTxtFromPools, ProcessRankings, ShowRankings, ShowAllRankings, \
WriteTxtRankings, WriteMobileRankings, WriteCSVRankings, WriteTxtAllRankings, WriteMobileAllRankings, WriteCSVAllRankings, PersonRankings, \
NoNames, YetToCompete, ShowSeeding, ShowPooling, ShowSeedingFromPools, ShowPoolingFromPools, FunctionInfo]
def UsefulFunctionsListed():
"""Prints the names of all useful functions."""
for Function in UsefulFunctionList:
print(Function.__name__)
def UsefulFunctions():
"""Prints a list of useful functions as well as their docstrings."""
print("Titles = ['Melee', 'PM', 'Sm4sh', 'SSB', 'Brawl'] and Sortings = ['Bottom', 'Low', 'Middle'].")
print()
for Function in UsefulFunctionList:
FunctionInfo(Function)
if Function != UsefulFunctionList[-1]:
print()
| {
"repo_name": "avnestico/smash_glicko2_ratings",
"path": "RankingFunctions.py",
"copies": "1",
"size": "52613",
"license": "mit",
"hash": -383307278361958300,
"line_mean": 46.5705244123,
"line_max": 186,
"alpha_frac": 0.6247125235,
"autogenerated": false,
"ratio": 3.379343567345366,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9467327655340143,
"avg_score": 0.007345687101044665,
"num_lines": 1106
} |
__all__ = ["need_migrate", "run"]
from peewee import Model, IntegerField, DateTimeField
import logging
import os
from glob import glob
import re
from datetime import datetime
import importlib
import sys
logger = logging.getLogger("peewee-simple-migrate")
class MigrationError(Exception):
pass
def generate_model(db, table_name=None):
class Migration(Model):
"""This model it self can't be migrated, so don't change it's structure unless necessary."""
version = IntegerField(primary_key=True)
latest_migrate = DateTimeField(null=False)
class Meta:
database = db
db_table = table_name or "migration"
return Migration
def get_versions(migration_dir):
migrate_files = glob(os.path.join(migration_dir, "ver_[0-9]*.py"))
# Put the version 0 into version list.
# It represent the initial version of the data structure, and there's not a ver_xxx.py file for it.
versions = [0]
for name in migrate_files:
match = re.search(r"ver_(\d+)\.py$", name).groups()[0]
versions.append(int(match))
versions.sort()
return versions
def execute_migrate_code(migration_dir, module_name, db):
sys.path.insert(0, os.path.abspath(migration_dir))
module = importlib.import_module(module_name)
module.run(db)
sys.path = sys.path[1:]
def prepare(db, migration_dir, table_name=None):
Migration = generate_model(db, table_name)
versions = get_versions(migration_dir)
need_migration = False
current_version = None # None means it needs initialize
if Migration.table_exists():
current_version = Migration.select().get().version
if current_version not in versions:
raise MigrationError("version '{}' not found in local".format(current_version))
if current_version != versions[-1]:
need_migration = True
else:
need_migration = True
return [need_migration, current_version, Migration, versions]
def need_migrate(db, migration_dir, table_name=None):
"""return: bool"""
return prepare(db, migration_dir, table_name)[0]
def run(db, migration_dir, check_only=False, table_name=None):
[need_migration, current_version, Migration, versions] = prepare(db, migration_dir, table_name)
if not need_migration:
logger.debug("Already latest version {}, doesn't need migrate.".format(current_version))
return
if current_version is None:
if os.path.exists(os.path.join(migration_dir, "initialize.py")):
with db.transaction():
execute_migrate_code(migration_dir, "initialize", db)
db.create_tables([Migration], safe=True)
Migration.create(version=versions[-1] if len(versions) > 0 else 0,
latest_migrate=datetime.now())
logger.info("initialize complete, version {}.".format(versions[-1]))
else:
raise MigrationError("initialize.py not found")
else:
with db.transaction():
for version in versions:
if version > current_version:
module_name = "ver_{}".format(version)
execute_migrate_code(migration_dir, module_name, db)
query = Migration.update(version=versions[-1], latest_migrate=datetime.now())
query.execute()
logger.info("from version {} to {}, migrate complete.".format(current_version, versions[-1]))
| {
"repo_name": "anjianshi/peewee-simple-migrate",
"path": "peewee_simple_migrate.py",
"copies": "1",
"size": "3488",
"license": "mit",
"hash": 4810804351007812000,
"line_mean": 31.2962962963,
"line_max": 105,
"alpha_frac": 0.6416284404,
"autogenerated": false,
"ratio": 4.074766355140187,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5216394795540187,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'ElementMatcher',
'TagElementMatcher',
'NoAttributesTagElementMatcher',
'AttributeSubstringTagElementMatcher',
]
class ElementMatcher(object):
"""Abstract base class for element matchers"""
def doesMatch(self, element):
raise NotImplementedError
class TagElementMatcher(ElementMatcher):
def __init__(self, tags):
if isinstance(tags, basestring):
msg = 'The tags parameter must be a collection of strings.'
raise TypeError(msg)
self.tags = frozenset(tag.lower() for tag in tags)
def doesMatch(self, element):
return element.tag.lower() in self.tags
class NoAttributesTagElementMatcher(TagElementMatcher):
def doesMatch(self, element):
return (not element.attrib) and \
super(NoAttributesTagElementMatcher, self).doesMatch(element)
class AttributeSubstringTagElementMatcher(TagElementMatcher):
def __init__(self, tags, attributes, all_substrings=None, any_substrings=None, disallowed_substrings=None):
super(AttributeSubstringTagElementMatcher, self).__init__(tags)
self.attributes = frozenset(attr.lower() for attr in attributes)
self.all_substrings = frozenset(substr.lower() for substr in (all_substrings if all_substrings is not None else []))
self.any_substrings = frozenset(substr.lower() for substr in (any_substrings if any_substrings is not None else []))
self.disallowed_substrings = frozenset(substr.lower() for substr in (disallowed_substrings if disallowed_substrings is not None else []))
def doesMatch(self, element):
if not super(AttributeSubstringTagElementMatcher, self).doesMatch(element):
return False
for attr in self.attributes.intersection(element.attrib):
attr_value = element.attrib[attr].lower()
if (
all(ss in attr_value for ss in self.all_substrings) and
not any(ss in attr_value for ss in self.disallowed_substrings) and
(any(ss in attr_value for ss in self.any_substrings) if self.any_substrings else True)
):
return True
return False
| {
"repo_name": "taleinat/searchtml",
"path": "searchtml/matchers.py",
"copies": "1",
"size": "2182",
"license": "mit",
"hash": -2630215420643187700,
"line_mean": 40.9615384615,
"line_max": 145,
"alpha_frac": 0.6796516957,
"autogenerated": false,
"ratio": 4.245136186770428,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5424787882470428,
"avg_score": null,
"num_lines": null
} |
__all__ = (
'Engine',
'SimpleEngine',
'RandomEngine',
)
import abc
import random
class Engine(metaclass=abc.ABCMeta):
def __init__(self, net):
self._net = net
self._net.add_engine(self)
self._timestep = 0
@property
def net(self):
return self._net
@property
def timestep(self):
return self._timestep
def run(self):
while True:
num_fired = self.step()
if not num_fired:
break
def step(self):
net = self._net
num_fired = 0
for transition, substitution in self.firing_transitions():
print("FIRE: {!r})({!r})".format(transition, substitution))
transition.fire(substitution)
print("-->", net.get_marking())
num_fired += 1
if num_fired:
self._timestep += 1
return num_fired
@abc.abstractmethod
def firing_transitions(self):
raise NotImplementedError()
def enabled_transitions(self):
for transition in self._net.transitions():
substitutions = transition.substitutions()
if substitutions:
yield transition, substitutions
@abc.abstractmethod
def notify_net_changed(self):
raise NotImplementedError()
@abc.abstractmethod
def notify_transition_fired(self, transition):
raise NotImplementedError()
class SimpleEngine(Engine):
def firing_transitions(self):
for transition, substitutions in self.enabled_transitions():
yield transition, next(iter(substitutions))
def notify_transition_fired(self, transition):
pass
def notify_net_changed(self):
pass
class RandomEngine(Engine):
def firing_transitions(self):
lst = list(self.enabled_transitions)
transition, substitutions = random.choice(lst)
substitutions = list(substitutions)
substitution = random.choice(substitutions)
yield transition, substitution
def notify_transition_fired(self, transition):
pass
def notify_net_changed(self):
pass
| {
"repo_name": "simone-campagna/petra",
"path": "petra/engine.py",
"copies": "1",
"size": "2140",
"license": "apache-2.0",
"hash": 2211631142849900000,
"line_mean": 23.3181818182,
"line_max": 71,
"alpha_frac": 0.6060747664,
"autogenerated": false,
"ratio": 4.467640918580376,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5573715684980376,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'EventApi',
]
class EventApi(object):
def stream(self, start, end, priority=None, sources=None, tags=None):
"""
Get the events that occurred between the *start* and *end* POSIX timestamps,
optional filtered by *priority* ("low" or "normal"), *sources* and
*tags*.
See the `event API documentation <http://api.datadoghq.com/events>`_ for the
event data format.
>>> dog_http_api.stream(1313769783, 131378000, sources=["nagios"])
{ "events": [
{
"id": "event-1",
"title": "my first event",
"priority": "normal",
"handle": "alq@datadoghq.com",
"date_happened": 1313769783,
"source": "nagios",
"alert_type": "ok",
"is_aggregate": true,
"children": [
{
"id": "event-100",
"date_happened": 123459833,
"alert_type": "error"
}, ...
]
}, ...
]
}
"""
params = {
'start': int(start),
'end': int(end),
}
if priority:
params['priority'] = priority
if sources:
params['sources'] = ','.join(sources)
if tags:
params['tags'] = ','.join(tags)
return self.http_request('GET', '/events',
response_formatter=lambda x: x['events'],
**params
)
def get_event(self, id):
"""
Get details for an individual event with the given *id*.
See the `event API documentation <http://api.datadoghq.com/events>`_ for the
event data format.
>>> dog_http_api.get_event("event-1")
{
"id": "event-1",
"title": "my first event",
"priority": "normal",
"handle": "alq@datadoghq.com",
"date_happened": 1313769783,
"source": "nagios",
"alert_type": "ok",
"is_aggregate": true,
"children": [
{
"id": "event-100",
"date_happened": 123459833,
"alert_type": "error"
}, ...
]
}
"""
return self.http_request('GET', '/events/' + str(id),
response_formatter=lambda x: x['event'],
)
def _event(self, title, text, date_happened=None, handle=None, priority=None, related_event_id=None, tags=None, host=None, device_name=None, aggregation_key=None, source_type_name=None,**kwargs):
"""
Post an event.
:param title: title for the new event
:type title: string
:param text: event message
:type text: string
:param date_happened: when the event occurred. if unset defaults to the current time. (POSIX timestamp)
:type date_happened: integer
:param handle: user to post the event as. defaults to owner of the application key used to submit.
:type handle: string
:param priority: priority to post the event as. ("normal" or "low", defaults to "normal")
:type priority: string
:param related_event_id: post event as a child of the given event
:type related_event_id: id
:param tags: tags to post the event with
:type tags: list of strings
:param host: host to post the event with
:type host: list of strings
:param device_name: device_name to post the event with
:type device_name: list of strings
:param aggregation_key: key to aggregate this event on
:type aggregation_key: string
:param source_type_name: type of event to post the event with
:type source_type_name: string
:return: new event id
:rtype: integer
"""
body = {
'title': title,
'text': text,
}
if date_happened is not None:
body['date_happened'] = date_happened
if handle is not None:
body['handle'] = handle
if priority is not None:
body['priority'] = priority
if related_event_id is not None:
body['related_event_id'] = related_event_id
if tags is not None:
body['tags'] = ','.join(tags)
if host is not None:
body['host'] = host
if device_name is not None:
body['device_name'] = device_name
if aggregation_key is not None:
body['aggregation_key'] = aggregation_key
if source_type_name is not None:
body['source_type_name'] = source_type_name
body.update(kwargs)
return self.http_request('POST', '/events', body,
response_formatter=lambda x: x['event']['id'],
)
def event(self, *args, **kwargs):
"""
Post an event.
:param title: title for the new event
:type title: string
:param text: event message
:type text: string
:param date_happened: when the event occurred. if unset defaults to the current time. (POSIX timestamp)
:type date_happened: integer
:param handle: user to post the event as. defaults to owner of the application key used to submit.
:type handle: string
:param priority: priority to post the event as. ("normal" or "low", defaults to "normal")
:type priority: string
:param related_event_id: post event as a child of the given event
:type related_event_id: id
:param tags: tags to post the event with
:type tags: list of strings
:param host: host to post the event with
:type host: list of strings
:param device_name: device_name to post the event with
:type device_name: list of strings
:return: new event id
:rtype: integer
"""
return self._event(*args, **kwargs)
def event_with_response(self, *args, **kwargs):
return self._event(*args, **kwargs)
def comment(self, handle, message, comment_id=None, related_event_id=None):
"""
Post a comment *message* as the user with *handle*. Edit a comment by including it's *comment_id*.
Reply to a related event by setting the *related_event_id*.
>>> dog_http_api.comment("matt", "Hey! Something strange is going on.")
"""
body = {
'handle': handle,
'message': message,
}
if related_event_id is not None:
body['related_event_id'] = int(related_event_id)
return self.http_request('POST', '/comments', body,
response_formatter=lambda x: x['comment']['id'],
)
def update_comment(self, handle, message, comment_id):
body = {
'handle': handle,
'message': message,
}
return self.http_request('PUT', '/comments/%s' % comment_id, body,
response_formatter=lambda x: x['comment']['id'],
)
def delete_comment(self, comment_id):
"""
Delete a comment with the given *comment_id*.
>>> dog_http_api.delete_comment('1234')
"""
return self.http_request('DELETE', '/comments/' + str(comment_id))
| {
"repo_name": "DataDog/dogapi",
"path": "src/dogapi/http/events.py",
"copies": "1",
"size": "7284",
"license": "bsd-3-clause",
"hash": -23574762257918000,
"line_mean": 30.3965517241,
"line_max": 199,
"alpha_frac": 0.5399505766,
"autogenerated": false,
"ratio": 4.124575311438279,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5164525888038278,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Event',
'TaskCancelled',
'TaskSet',
'TaskStack',
'cancelling',
'select',
'spawn',
# HACK!
'close_socket_and_wakeup_task',
]
from collections import OrderedDict, deque
from functools import partial
import inspect
import curio
import curio.io
import curio.traps
from garage.assertions import ASSERT
from . import queues
from .base import Event # Create an alias to base.Event
class TaskCancelled(BaseException):
pass
async def spawn(coro, **kwargs):
"""Call curio.spawn() and patch the task object so that when it is
cancelled, asyncs.TaskCancelled is raised inside the coroutine.
asyncs.TaskCancelled is derived from BaseException, which has the
benefits that the usual catch-all exception block won't catch it,
and thus doesn't have to explicitly re-throw. This is especially
valuable when calling into third-party libraries that are unaware
of and don't re-throw TaskCancelled. For example:
When throwing curio.TaskCancelled:
try:
...
except curio.TaskCancelled:
# Need to explicitly re-throw due to catch-all below.
raise
except Exception:
...
When throwing asyncs.TaskCancelled:
try:
...
except Exception:
# No need to explicitly re-throw.
...
"""
task = await curio.spawn(coro, **kwargs)
task._send = partial(_send, task._send)
task._throw = partial(_throw, task._throw)
return task
def _send(send, arg):
try:
return send(arg)
except TaskCancelled as e:
raise curio.TaskCancelled from e
def _throw(throw, type_, *args):
# I assume curio doesn't pass any other args
ASSERT(not args, 'not expect args for throw(): %r', args)
if type_ is curio.TaskCancelled or type(type_) is curio.TaskCancelled:
# Raise asyncs.TaskCancelled in task's coroutine but raise
# curio.TaskCancelled in the curio main loop.
try:
return throw(TaskCancelled)
except TaskCancelled as e:
raise type_ from e
else:
return throw(type_, *args)
class cancelling:
@classmethod
async def spawn(cls, coro, *, spawn=spawn, **kwargs):
return cls(await spawn(coro, **kwargs))
def __init__(self, task):
self.task = task
async def __aenter__(self):
return self.task
async def __aexit__(self, *_):
await self.task.cancel()
async def select(cases, *, spawn=spawn):
"""Wait on a list of coroutine or task and return the first done.
The cases parameter could be either a dict-like with a keys()
method or an iterable object. If it's a dict-like object, the
keys are either a coroutine or a task.
The advantage of select() over curio.TaskGroup is that it accepts
coroutines and spawns new tasks for those coroutines so that they
may be waited in parallel. Also select() will clean up itself by
cancelling those internally-spawned tasks on its way out.
"""
async with TaskStack(spawn=spawn) as stack:
dict_like = hasattr(cases, 'keys')
tasks = {}
for coro_or_task in cases:
if inspect.iscoroutine(coro_or_task):
task = await stack.spawn(coro_or_task)
else:
task = coro_or_task
tasks[task] = dict_like and cases[coro_or_task]
# XXX A Task object cannot belong to more than one TaskGroup; as
# a result, if one of the task in the `cases` is spawning from a
# TaskGroup, curio.TaskGroup() will raise an AssertionError.
done_task = await curio.TaskGroup(tasks).next_done()
if dict_like:
return done_task, tasks[done_task]
else:
return done_task
class TaskSet:
"""Similar to curio.TaskGroup, but use asyncs.spawn by default."""
#
# State transition:
# --> __init__() --> OPERATING
# OPERATING --> graceful_exit() --> CLOSING
# OPERATING --> __aexit__() --> CLOSED
# CLOSING --> __aexit__() --> CLOSED
#
# OPERATING == not self._graceful_exit and self._pending_tasks is not None
# CLOSING == self._graceful_exit and self._pending_tasks is not None
# CLOSED == self._graceful_exit and not self._pending_tasks
#
class TaskGroupAdapter:
def __init__(self, task_set):
self.__task_set = task_set
# Callback from curio.Task
async def _task_done(self, task):
self.__task_set._on_task_done(task)
# Callback from curio.Task
def _task_discard(self, task):
pass # Nothing here
def __init__(self, *, spawn=spawn):
self._pending_tasks = OrderedDict() # For implementing OrderedSet
self._done_tasks = queues.Queue()
self._graceful_exit = False
self._spawn = spawn
def ignore_done_tasks(self):
"""Do not track done tasks."""
self._done_tasks.close()
def graceful_exit(self):
self._graceful_exit = True
# We may close the _done_tasks queue when it's CLOSED state
if not self._pending_tasks:
self._done_tasks.close()
async def __aenter__(self):
return self
async def __aexit__(self, *_):
self.graceful_exit()
tasks, self._pending_tasks = self._pending_tasks, None
for task in reversed(tasks.keys()):
await task.cancel()
async def spawn(self, coro, **kwargs):
ASSERT(not self._graceful_exit, '%s is closing', self)
task = await self._spawn(coro, **kwargs)
ASSERT.false(task._taskgroup)
ASSERT.false(task._ignore_result)
self._pending_tasks[task] = None # Dummy value
task._taskgroup = self.TaskGroupAdapter(self)
return task
def _on_task_done(self, task):
if self._pending_tasks:
self._pending_tasks.pop(task)
# When we are aborting (bypassing graceful_exit()), there could
# be tasks being done after we closed the _done_tasks queue (for
# this to happen, we only need two tasks being done after
# __aexit__ returns, and then the first task's _on_task_done closes
# the _done_tasks queue (because _pending_tasks is None) and the
# second task's _on_task_done sees a closed _done_tasks queue)
if not self._done_tasks.is_closed():
# Call put_nowait() so that we won't be blocked by put()
# (is being blocked here a problem?)
self._done_tasks.put_nowait(task)
# We may close the _done_tasks queue when it's CLOSED state
if self._graceful_exit and not self._pending_tasks:
self._done_tasks.close()
async def next_done(self):
try:
return await self._done_tasks.get()
except queues.Closed:
return None
def __aiter__(self):
return self
async def __anext__(self):
next = await self.next_done()
if next is None:
raise StopAsyncIteration
return next
class TaskStack:
"""A class that is similar to ExitStack but is specific for Task and
cancels all tasks on exit.
You may use this class to propagate task cancellation from parent
task to child tasks.
(By default, use asyncs.spawn rather than curio.spawn.)
Note: curio.TaskGroup does not cancel tasks in reverse order but
TaskStack does.
"""
def __init__(self, *, spawn=spawn):
self._tasks = None
self._callbacks = None
self._spawn = spawn
async def __aenter__(self):
ASSERT.none(self._callbacks)
self._tasks = deque()
self._callbacks = deque()
return self
async def __aexit__(self, *_):
ASSERT.not_none(self._callbacks)
self._tasks = None
callbacks, self._callbacks = self._callbacks, None
for is_async, func, args, kwargs in reversed(callbacks):
if is_async:
await func(*args, **kwargs)
else:
func(*args, **kwargs)
def __iter__(self):
ASSERT.not_none(self._tasks)
yield from self._tasks
async def wait_any(self):
ASSERT.not_none(self._tasks)
return await curio.TaskGroup(self._tasks).next_done()
async def spawn(self, coro, **kwargs):
"""Spawn a new task and push it onto stack."""
ASSERT.not_none(self._callbacks)
task = await self._spawn(coro, **kwargs)
self._tasks.append(task)
self.callback(task.cancel)
return task
def callback(self, func, *args, **kwargs):
"""Add asynchronous callback."""
ASSERT.not_none(self._callbacks)
self._callbacks.append((True, func, args, kwargs))
def sync_callback(self, func, *args, **kwargs):
"""Add synchronous callback."""
ASSERT.not_none(self._callbacks)
self._callbacks.append((False, func, args, kwargs))
async def close_socket_and_wakeup_task(socket):
"""Close a socket and wake up any task blocked on it.
This is a hack to workaround the issue that when a file descriptor
is closed, any task blocked on it will not be waked up, and thus
will be blocked forever.
NOTE: There will be at most one task blocked on a file descriptor.
"""
ASSERT.type_of(socket, curio.io.Socket)
if not socket._socket:
return # It's closed already.
kernel = await curio.traps._get_kernel()
def mark_ready(task):
kernel._ready.append(task)
task.next_value = None
task.next_exc = None
task.state = 'READY'
task.cancel_func = None
# Wake up tasks and unregister the file from event loop.
fileno = socket._fileno
try:
key = kernel._selector.get_key(fileno)
rtask, wtask = key.data
if rtask:
mark_ready(rtask)
if wtask:
mark_ready(wtask)
kernel._selector.unregister(fileno)
except KeyError:
pass
await socket.close()
| {
"repo_name": "clchiou/garage",
"path": "py/garage/garage/asyncs/__init__.py",
"copies": "1",
"size": "10210",
"license": "mit",
"hash": 6846861701288514000,
"line_mean": 30.4153846154,
"line_max": 78,
"alpha_frac": 0.6024485798,
"autogenerated": false,
"ratio": 3.9573643410852712,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.505981292088527,
"avg_score": null,
"num_lines": null
} |
__all__ = ['newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast',
'dtype', 'fromstring', 'fromfile', 'frombuffer',
'int_asbuffer', 'where', 'argwhere',
'concatenate', 'fastCopyAndTranspose', 'lexsort', 'set_numeric_ops',
'can_cast', 'promote_types', 'min_scalar_type', 'result_type',
'asarray', 'asanyarray', 'ascontiguousarray', 'asfortranarray',
'isfortran', 'empty_like', 'zeros_like',
'correlate', 'convolve', 'inner', 'dot', 'einsum', 'outer', 'vdot',
'alterdot', 'restoredot', 'roll', 'rollaxis', 'cross', 'tensordot',
'array2string', 'get_printoptions', 'set_printoptions',
'array_repr', 'array_str', 'set_string_function',
'little_endian', 'require',
'fromiter', 'array_equal', 'array_equiv',
'indices', 'fromfunction',
'load', 'loads', 'isscalar', 'binary_repr', 'base_repr',
'ones', 'identity', 'allclose', 'compare_chararrays', 'putmask',
'seterr', 'geterr', 'setbufsize', 'getbufsize',
'seterrcall', 'geterrcall', 'errstate', 'flatnonzero',
'Inf', 'inf', 'infty', 'Infinity',
'nan', 'NaN', 'False_', 'True_', 'bitwise_not',
'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', 'ALLOW_THREADS',
'ComplexWarning']
import sys
import warnings
import multiarray
import umath
from umath import *
import numerictypes
from numerictypes import *
if sys.version_info[0] < 3:
__all__.extend(['getbuffer', 'newbuffer'])
class ComplexWarning(RuntimeWarning):
"""
The warning raised when casting a complex dtype to a real dtype.
As implemented, casting a complex number to a real discards its imaginary
part, but this behavior may not be what the user actually wants.
"""
pass
bitwise_not = invert
CLIP = multiarray.CLIP
WRAP = multiarray.WRAP
RAISE = multiarray.RAISE
MAXDIMS = multiarray.MAXDIMS
ALLOW_THREADS = multiarray.ALLOW_THREADS
BUFSIZE = multiarray.BUFSIZE
ndarray = multiarray.ndarray
flatiter = multiarray.flatiter
nditer = multiarray.nditer
nested_iters = multiarray.nested_iters
broadcast = multiarray.broadcast
dtype = multiarray.dtype
ufunc = type(sin)
# originally from Fernando Perez's IPython
def zeros_like(a, dtype=None, order='K', subok=True):
"""
Return an array of zeros with the same shape and type as a given array.
With default parameters, is equivalent to ``a.copy().fill(0)``.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
Returns
-------
out : ndarray
Array of zeros with the same shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.zeros_like(x)
array([[0, 0, 0],
[0, 0, 0]])
>>> y = np.arange(3, dtype=np.float)
>>> y
array([ 0., 1., 2.])
>>> np.zeros_like(y)
array([ 0., 0., 0.])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok)
res.fill(0)
return res
# end Fernando's utilities
def extend_all(module):
adict = {}
for a in __all__:
adict[a] = 1
try:
mall = getattr(module, '__all__')
except AttributeError:
mall = [k for k in module.__dict__.keys() if not k.startswith('_')]
for a in mall:
if a not in adict:
__all__.append(a)
extend_all(umath)
extend_all(numerictypes)
newaxis = None
arange = multiarray.arange
array = multiarray.array
zeros = multiarray.zeros
count_nonzero = multiarray.count_nonzero
empty = multiarray.empty
empty_like = multiarray.empty_like
fromstring = multiarray.fromstring
fromiter = multiarray.fromiter
fromfile = multiarray.fromfile
frombuffer = multiarray.frombuffer
if sys.version_info[0] < 3:
newbuffer = multiarray.newbuffer
getbuffer = multiarray.getbuffer
int_asbuffer = multiarray.int_asbuffer
where = multiarray.where
concatenate = multiarray.concatenate
fastCopyAndTranspose = multiarray._fastCopyAndTranspose
set_numeric_ops = multiarray.set_numeric_ops
can_cast = multiarray.can_cast
promote_types = multiarray.promote_types
min_scalar_type = multiarray.min_scalar_type
result_type = multiarray.result_type
lexsort = multiarray.lexsort
compare_chararrays = multiarray.compare_chararrays
putmask = multiarray.putmask
einsum = multiarray.einsum
def asarray(a, dtype=None, order=None):
"""
Convert the input to an array.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('F' for FORTRAN)
memory representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
See Also
--------
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asarray(a)
array([1, 2])
Existing arrays are not copied:
>>> a = np.array([1, 2])
>>> np.asarray(a) is a
True
If `dtype` is set, array is copied only if dtype does not match:
>>> a = np.array([1, 2], dtype=np.float32)
>>> np.asarray(a, dtype=np.float32) is a
True
>>> np.asarray(a, dtype=np.float64) is a
False
Contrary to `asanyarray`, ndarray subclasses are not passed through:
>>> issubclass(np.matrix, np.ndarray)
True
>>> a = np.matrix([[1, 2]])
>>> np.asarray(a) is a
False
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order)
def asanyarray(a, dtype=None, order=None):
"""
Convert the input to an ndarray, but pass ndarray subclasses through.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes scalars, lists, lists of tuples, tuples, tuples of tuples,
tuples of lists, and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('F') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray or an ndarray subclass
Array interpretation of `a`. If `a` is an ndarray or a subclass
of ndarray, it is returned as-is and no copy is performed.
See Also
--------
asarray : Similar function which always returns ndarrays.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and
Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asanyarray(a)
array([1, 2])
Instances of `ndarray` subclasses are passed through as-is:
>>> a = np.matrix([1, 2])
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order, subok=True)
def ascontiguousarray(a, dtype=None):
"""
Return a contiguous array in memory (C order).
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
Data-type of returned array.
Returns
-------
out : ndarray
Contiguous array of same shape and content as `a`, with type `dtype`
if specified.
See Also
--------
asfortranarray : Convert input to an ndarray with column-major
memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> np.ascontiguousarray(x, dtype=np.float32)
array([[ 0., 1., 2.],
[ 3., 4., 5.]], dtype=float32)
>>> x.flags['C_CONTIGUOUS']
True
"""
return array(a, dtype, copy=False, order='C', ndmin=1)
def asfortranarray(a, dtype=None):
"""
Return an array laid out in Fortran order in memory.
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
By default, the data-type is inferred from the input data.
Returns
-------
out : ndarray
The input `a` in Fortran, or column-major, order.
See Also
--------
ascontiguousarray : Convert input to a contiguous (C order) array.
asanyarray : Convert input to an ndarray with either row or
column-major memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> y = np.asfortranarray(x)
>>> x.flags['F_CONTIGUOUS']
False
>>> y.flags['F_CONTIGUOUS']
True
"""
return array(a, dtype, copy=False, order='F', ndmin=1)
def require(a, dtype=None, requirements=None):
"""
Return an ndarray of the provided type that satisfies requirements.
This function is useful to be sure that an array with the correct flags
is returned for passing to compiled code (perhaps through ctypes).
Parameters
----------
a : array_like
The object to be converted to a type-and-requirement-satisfying array.
dtype : data-type
The required data-type, the default data-type is float64).
requirements : str or list of str
The requirements list can be any of the following
* 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
* 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
* 'ALIGNED' ('A') - ensure a data-type aligned array
* 'WRITEABLE' ('W') - ensure a writable array
* 'OWNDATA' ('O') - ensure an array that owns its own data
See Also
--------
asarray : Convert input to an ndarray.
asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfortranarray : Convert input to an ndarray with column-major
memory order.
ndarray.flags : Information about the memory layout of the array.
Notes
-----
The returned array will be guaranteed to have the listed requirements
by making a copy if needed.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : False
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
>>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
>>> y.flags
C_CONTIGUOUS : False
F_CONTIGUOUS : True
OWNDATA : True
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
"""
if requirements is None:
requirements = []
else:
requirements = [x.upper() for x in requirements]
if not requirements:
return asanyarray(a, dtype=dtype)
if 'ENSUREARRAY' in requirements or 'E' in requirements:
subok = False
else:
subok = True
arr = array(a, dtype=dtype, copy=False, subok=subok)
copychar = 'A'
if 'FORTRAN' in requirements or \
'F_CONTIGUOUS' in requirements or \
'F' in requirements:
copychar = 'F'
elif 'CONTIGUOUS' in requirements or \
'C_CONTIGUOUS' in requirements or \
'C' in requirements:
copychar = 'C'
for prop in requirements:
if not arr.flags[prop]:
arr = arr.copy(copychar)
break
return arr
def isfortran(a):
"""
Returns True if array is arranged in Fortran-order in memory
and dimension > 1.
Parameters
----------
a : ndarray
Input array.
Examples
--------
np.array allows to specify whether the array is written in C-contiguous
order (last index varies the fastest), or FORTRAN-contiguous order in
memory (first index varies the fastest).
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = np.array([[1, 2, 3], [4, 5, 6]], order='FORTRAN')
>>> b
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(b)
True
The transpose of a C-ordered array is a FORTRAN-ordered array.
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = a.T
>>> b
array([[1, 4],
[2, 5],
[3, 6]])
>>> np.isfortran(b)
True
1-D arrays always evaluate as False.
>>> np.isfortran(np.array([1, 2], order='FORTRAN'))
False
"""
return a.flags.fnc
def argwhere(a):
"""
Find the indices of array elements that are non-zero, grouped by element.
Parameters
----------
a : array_like
Input data.
Returns
-------
index_array : ndarray
Indices of elements that are non-zero. Indices are grouped by element.
See Also
--------
where, nonzero
Notes
-----
``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``.
The output of ``argwhere`` is not suitable for indexing arrays.
For this purpose use ``where(a)`` instead.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argwhere(x>1)
array([[0, 2],
[1, 0],
[1, 1],
[1, 2]])
"""
return transpose(asanyarray(a).nonzero())
def flatnonzero(a):
"""
Return indices that are non-zero in the flattened version of a.
This is equivalent to a.ravel().nonzero()[0].
Parameters
----------
a : ndarray
Input array.
Returns
-------
res : ndarray
Output array, containing the indices of the elements of `a.ravel()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
ravel : Return a 1-D array containing the elements of the input array.
Examples
--------
>>> x = np.arange(-2, 3)
>>> x
array([-2, -1, 0, 1, 2])
>>> np.flatnonzero(x)
array([0, 1, 3, 4])
Use the indices of the non-zero elements as an index array to extract
these elements:
>>> x.ravel()[np.flatnonzero(x)]
array([-2, -1, 1, 2])
"""
return a.ravel().nonzero()[0]
_mode_from_name_dict = {'v': 0,
's' : 1,
'f' : 2}
def _mode_from_name(mode):
if isinstance(mode, type("")):
return _mode_from_name_dict[mode.lower()[0]]
return mode
def correlate(a, v, mode='valid', old_behavior=False):
"""
Cross-correlation of two 1-dimensional sequences.
This function computes the correlation as generally defined in signal
processing texts::
z[k] = sum_n a[n] * conj(v[n+k])
with a and v sequences being zero-padded where necessary and conj being
the conjugate.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `convolve` docstring. Note that the default
is `valid`, unlike `convolve`, which uses `full`.
old_behavior : bool
If True, uses the old behavior from Numeric, (correlate(a,v) == correlate(v,
a), and the conjugate is not taken for complex arrays). If False, uses
the conventional signal processing definition (see note).
See Also
--------
convolve : Discrete, linear convolution of two one-dimensional sequences.
Examples
--------
>>> np.correlate([1, 2, 3], [0, 1, 0.5])
array([ 3.5])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
array([ 2. , 3.5, 3. ])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
array([ 0.5, 2. , 3.5, 3. , 0. ])
"""
mode = _mode_from_name(mode)
# the old behavior should be made available under a different name, see thread
# http://thread.gmane.org/gmane.comp.python.numeric.general/12609/focus=12630
if old_behavior:
warnings.warn("""
The old behavior of correlate was deprecated for 1.4.0, and will be completely removed
for NumPy 2.0.
The new behavior fits the conventional definition of correlation: inputs are
never swapped, and the second argument is conjugated for complex arrays.""",
DeprecationWarning)
return multiarray.correlate(a,v,mode)
else:
return multiarray.correlate2(a,v,mode)
def convolve(a,v,mode='full'):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
The convolution operator is often seen in signal processing, where it
models the effect of a linear time-invariant system on a signal [1]_. In
probability theory, the sum of two independent random variables is
distributed according to the convolution of their individual
distributions.
Parameters
----------
a : (N,) array_like
First one-dimensional input array.
v : (M,) array_like
Second one-dimensional input array.
mode : {'full', 'valid', 'same'}, optional
'full':
By default, mode is 'full'. This returns the convolution
at each point of overlap, with an output shape of (N+M-1,). At
the end-points of the convolution, the signals do not overlap
completely, and boundary effects may be seen.
'same':
Mode `same` returns output of length ``max(M, N)``. Boundary
effects are still visible.
'valid':
Mode `valid` returns output of length
``max(M, N) - min(M, N) + 1``. The convolution product is only given
for points where the signals overlap completely. Values outside
the signal boundary have no effect.
Returns
-------
out : ndarray
Discrete, linear convolution of `a` and `v`.
See Also
--------
scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier
Transform.
scipy.linalg.toeplitz : Used to construct the convolution operator.
Notes
-----
The discrete convolution operation is defined as
.. math:: (f * g)[n] = \\sum_{m = -\\infty}^{\\infty} f[m] g[n - m]
It can be shown that a convolution :math:`x(t) * y(t)` in time/space
is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier
domain, after appropriate padding (padding is necessary to prevent
circular convolution). Since multiplication is more efficient (faster)
than convolution, the function `scipy.signal.fftconvolve` exploits the
FFT to calculate the convolution of large data-sets.
References
----------
.. [1] Wikipedia, "Convolution", http://en.wikipedia.org/wiki/Convolution.
Examples
--------
Note how the convolution operator flips the second array
before "sliding" the two across one another:
>>> np.convolve([1, 2, 3], [0, 1, 0.5])
array([ 0. , 1. , 2.5, 4. , 1.5])
Only return the middle values of the convolution.
Contains boundary effects, where zeros are taken
into account:
>>> np.convolve([1,2,3],[0,1,0.5], 'same')
array([ 1. , 2.5, 4. ])
The two arrays are of the same length, so there
is only one position where they completely overlap:
>>> np.convolve([1,2,3],[0,1,0.5], 'valid')
array([ 2.5])
"""
a,v = array(a, ndmin=1),array(v, ndmin=1)
if (len(v) > len(a)):
a, v = v, a
if len(a) == 0 :
raise ValueError('a cannot be empty')
if len(v) == 0 :
raise ValueError('v cannot be empty')
mode = _mode_from_name(mode)
return multiarray.correlate(a, v[::-1], mode)
def outer(a,b):
"""
Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a, b : array_like, shape (M,), (N,)
First and second input vectors. Inputs are flattened if they
are not already 1-dimensional.
Returns
-------
out : ndarray, shape (M, N)
``out[i, j] = a[i] * b[j]``
See also
--------
inner, einsum
References
----------
.. [1] : G. H. Golub and C. F. van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
>>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
>>> im
array([[ 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
[ 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
[ 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
>>> grid = rl + im
>>> grid
array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
[-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],
[-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],
[-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],
[-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])
An example using a "vector" of letters:
>>> x = np.array(['a', 'b', 'c'], dtype=object)
>>> np.outer(x, [1, 2, 3])
array([[a, aa, aaa],
[b, bb, bbb],
[c, cc, ccc]], dtype=object)
"""
a = asarray(a)
b = asarray(b)
return a.ravel()[:,newaxis]*b.ravel()[newaxis,:]
# try to import blas optimized dot if available
try:
# importing this changes the dot function for basic 4 types
# to blas-optimized versions.
from _dotblas import dot, vdot, inner, alterdot, restoredot
except ImportError:
# docstrings are in add_newdocs.py
inner = multiarray.inner
dot = multiarray.dot
def vdot(a, b):
return dot(asarray(a).ravel().conj(), asarray(b).ravel())
def alterdot():
pass
def restoredot():
pass
def tensordot(a, b, axes=2):
"""
Compute tensor dot product along specified axes for arrays >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
``a`` and ``b``, and an array_like object containing two array_like
objects, ``(a_axes, b_axes)``, sum the products of ``a``'s and ``b``'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
dimensions of ``a`` and the first ``N`` dimensions of ``b`` are summed
over.
Parameters
----------
a, b : array_like, len(shape) >= 1
Tensors to "dot".
axes : variable type
* integer_like scalar
Number of axes to sum over (applies to both arrays); or
* array_like, shape = (2,), both elements array_like
Axes to be summed over, first sequence applying to ``a``, second
to ``b``.
See Also
--------
dot, einsum
Notes
-----
When there is more than one axis to sum over - and they are not the last
(first) axes of ``a`` (``b``) - the argument ``axes`` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
Examples
--------
A "traditional" example:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> # A slower but equivalent way of computing the same...
>>> d = np.zeros((5,2))
>>> for i in range(5):
... for j in range(2):
... for k in range(3):
... for n in range(4):
... d[i,j] += a[k,n,i] * b[n,k,j]
>>> c == d
array([[ True, True],
[ True, True],
[ True, True],
[ True, True],
[ True, True]], dtype=bool)
An extended example taking advantage of the overloading of + and \\*:
>>> a = np.array(range(1, 9))
>>> a.shape = (2, 2, 2)
>>> A = np.array(('a', 'b', 'c', 'd'), dtype=object)
>>> A.shape = (2, 2)
>>> a; A
array([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])
array([[a, b],
[c, d]], dtype=object)
>>> np.tensordot(a, A) # third argument default is 2
array([abbcccdddd, aaaaabbbbbbcccccccdddddddd], dtype=object)
>>> np.tensordot(a, A, 1)
array([[[acc, bdd],
[aaacccc, bbbdddd]],
[[aaaaacccccc, bbbbbdddddd],
[aaaaaaacccccccc, bbbbbbbdddddddd]]], dtype=object)
>>> np.tensordot(a, A, 0) # "Left for reader" (result too long to incl.)
array([[[[[a, b],
[c, d]],
...
>>> np.tensordot(a, A, (0, 1))
array([[[abbbbb, cddddd],
[aabbbbbb, ccdddddd]],
[[aaabbbbbbb, cccddddddd],
[aaaabbbbbbbb, ccccdddddddd]]], dtype=object)
>>> np.tensordot(a, A, (2, 1))
array([[[abb, cdd],
[aaabbbb, cccdddd]],
[[aaaaabbbbbb, cccccdddddd],
[aaaaaaabbbbbbbb, cccccccdddddddd]]], dtype=object)
>>> np.tensordot(a, A, ((0, 1), (0, 1)))
array([abbbcccccddddddd, aabbbbccccccdddddddd], dtype=object)
>>> np.tensordot(a, A, ((2, 1), (1, 0)))
array([acccbbdddd, aaaaacccccccbbbbbbdddddddd], dtype=object)
"""
try:
iter(axes)
except:
axes_a = range(-axes,0)
axes_b = range(0,axes)
else:
axes_a, axes_b = axes
try:
na = len(axes_a)
axes_a = list(axes_a)
except TypeError:
axes_a = [axes_a]
na = 1
try:
nb = len(axes_b)
axes_b = list(axes_b)
except TypeError:
axes_b = [axes_b]
nb = 1
a, b = asarray(a), asarray(b)
as_ = a.shape
nda = len(a.shape)
bs = b.shape
ndb = len(b.shape)
equal = True
if (na != nb): equal = False
else:
for k in xrange(na):
if as_[axes_a[k]] != bs[axes_b[k]]:
equal = False
break
if axes_a[k] < 0:
axes_a[k] += nda
if axes_b[k] < 0:
axes_b[k] += ndb
if not equal:
raise ValueError, "shape-mismatch for sum"
# Move the axes to sum over to the end of "a"
# and to the front of "b"
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = notin + axes_a
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
newshape_a = (-1, N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = axes_b + notin
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
newshape_b = (N2, -1)
oldb = [bs[axis] for axis in notin]
at = a.transpose(newaxes_a).reshape(newshape_a)
bt = b.transpose(newaxes_b).reshape(newshape_b)
res = dot(at, bt)
return res.reshape(olda + oldb)
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements that roll beyond the last position are re-introduced at
the first.
Parameters
----------
a : array_like
Input array.
shift : int
The number of places by which elements are shifted.
axis : int, optional
The axis along which elements are shifted. By default, the array
is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : ndarray
Output array, with the same shape as `a`.
See Also
--------
rollaxis : Roll the specified axis backwards, until it lies in a
given position.
Examples
--------
>>> x = np.arange(10)
>>> np.roll(x, 2)
array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> x2 = np.reshape(x, (2,5))
>>> x2
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> np.roll(x2, 1)
array([[9, 0, 1, 2, 3],
[4, 5, 6, 7, 8]])
>>> np.roll(x2, 1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4]])
>>> np.roll(x2, 1, axis=1)
array([[4, 0, 1, 2, 3],
[9, 5, 6, 7, 8]])
"""
a = asanyarray(a)
if axis is None:
n = a.size
reshape = True
else:
n = a.shape[axis]
reshape = False
shift %= n
indexes = concatenate((arange(n-shift,n),arange(n-shift)))
res = a.take(indexes, axis)
if reshape:
return res.reshape(a.shape)
else:
return res
def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
Parameters
----------
a : ndarray
Input array.
axis : int
The axis to roll backwards. The positions of the other axes do not
change relative to one another.
start : int, optional
The axis is rolled until it lies before this position. The default,
0, results in a "complete" roll.
Returns
-------
res : ndarray
Output array.
See Also
--------
roll : Roll the elements of an array by a number of positions along a
given axis.
Examples
--------
>>> a = np.ones((3,4,5,6))
>>> np.rollaxis(a, 3, 1).shape
(3, 6, 4, 5)
>>> np.rollaxis(a, 2).shape
(5, 3, 4, 6)
>>> np.rollaxis(a, 1, 4).shape
(3, 5, 6, 4)
"""
n = a.ndim
if axis < 0:
axis += n
if start < 0:
start += n
msg = 'rollaxis: %s (%d) must be >=0 and < %d'
if not (0 <= axis < n):
raise ValueError, msg % ('axis', axis, n)
if not (0 <= start < n+1):
raise ValueError, msg % ('start', start, n+1)
if (axis < start): # it's been removed
start -= 1
if axis==start:
return a
axes = range(0,n)
axes.remove(axis)
axes.insert(start, axis)
return a.transpose(axes)
# fix hack in scipy which imports this function
def _move_axis_to_0(a, axis):
return rollaxis(a, axis, 0)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""
Return the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
are defined by the last axis of `a` and `b` by default, and these axes
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
2, the third component of the input vector is assumed to be zero and the
cross product calculated accordingly. In cases where both input vectors
have dimension 2, the z-component of the cross product is returned.
Parameters
----------
a : array_like
Components of the first vector(s).
b : array_like
Components of the second vector(s).
axisa : int, optional
Axis of `a` that defines the vector(s). By default, the last axis.
axisb : int, optional
Axis of `b` that defines the vector(s). By default, the last axis.
axisc : int, optional
Axis of `c` containing the cross product vector(s). By default, the
last axis.
axis : int, optional
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
Returns
-------
c : ndarray
Vector cross product(s).
Raises
------
ValueError
When the dimension of the vector(s) in `a` and/or `b` does not
equal 2 or 3.
See Also
--------
inner : Inner product
outer : Outer product.
ix_ : Construct index arrays.
Examples
--------
Vector cross-product.
>>> x = [1, 2, 3]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([-3, 6, -3])
One vector with dimension 2.
>>> x = [1, 2]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Equivalently:
>>> x = [1, 2, 0]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Both vectors with dimension 2.
>>> x = [1,2]
>>> y = [4,5]
>>> np.cross(x, y)
-3
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the `right-hand rule`.
>>> x = np.array([[1,2,3], [4,5,6]])
>>> y = np.array([[4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[-3, 6, -3],
[ 3, -6, 3]])
The orientation of `c` can be changed using the `axisc` keyword.
>>> np.cross(x, y, axisc=0)
array([[-3, 3],
[ 6, -6],
[-3, 3]])
Change the vector definition of `x` and `y` using `axisa` and `axisb`.
>>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
>>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[ -6, 12, -6],
[ 0, 0, 0],
[ 6, -12, 6]])
>>> np.cross(x, y, axisa=0, axisb=0)
array([[-24, 48, -24],
[-30, 60, -30],
[-36, 72, -36]])
"""
if axis is not None:
axisa,axisb,axisc=(axis,)*3
a = asarray(a).swapaxes(axisa, 0)
b = asarray(b).swapaxes(axisb, 0)
msg = "incompatible dimensions for cross product\n"\
"(dimension must be 2 or 3)"
if (a.shape[0] not in [2,3]) or (b.shape[0] not in [2,3]):
raise ValueError(msg)
if a.shape[0] == 2:
if (b.shape[0] == 2):
cp = a[0]*b[1] - a[1]*b[0]
if cp.ndim == 0:
return cp
else:
return cp.swapaxes(0, axisc)
else:
x = a[1]*b[2]
y = -a[0]*b[2]
z = a[0]*b[1] - a[1]*b[0]
elif a.shape[0] == 3:
if (b.shape[0] == 3):
x = a[1]*b[2] - a[2]*b[1]
y = a[2]*b[0] - a[0]*b[2]
z = a[0]*b[1] - a[1]*b[0]
else:
x = -a[2]*b[1]
y = a[2]*b[0]
z = a[0]*b[1] - a[1]*b[0]
cp = array([x,y,z])
if cp.ndim == 1:
return cp
else:
return cp.swapaxes(0,axisc)
#Use numarray's printing function
from arrayprint import array2string, get_printoptions, set_printoptions
_typelessdata = [int_, float_, complex_]
if issubclass(intc, int):
_typelessdata.append(intc)
if issubclass(longlong, int):
_typelessdata.append(longlong)
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
"""
Return the string representation of an array.
Parameters
----------
arr : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters split the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero, default is False. Very small
is defined by `precision`, if the precision is 8 then
numbers smaller than 5e-9 are represented as zero.
Returns
-------
string : str
The string representation of an array.
See Also
--------
array_str, array2string, set_printoptions
Examples
--------
>>> np.array_repr(np.array([1,2]))
'array([1, 2])'
>>> np.array_repr(np.ma.array([0.]))
'MaskedArray([ 0.])'
>>> np.array_repr(np.array([], np.int32))
'array([], dtype=int32)'
>>> x = np.array([1e-6, 4e-7, 2, 3])
>>> np.array_repr(x, precision=6, suppress_small=True)
'array([ 0.000001, 0. , 2. , 3. ])'
"""
if arr.size > 0 or arr.shape==(0,):
lst = array2string(arr, max_line_width, precision, suppress_small,
', ', "array(")
else: # show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(arr.shape),)
typeless = arr.dtype.type in _typelessdata
if arr.__class__ is not ndarray:
cName= arr.__class__.__name__
else:
cName = "array"
if typeless and arr.size:
return cName + "(%s)" % lst
else:
typename=arr.dtype.name
lf = ''
if issubclass(arr.dtype.type, flexible):
if arr.dtype.names:
typename = "%s" % str(arr.dtype)
else:
typename = "'%s'" % str(arr.dtype)
lf = '\n'+' '*len("array(")
return cName + "(%s, %sdtype=%s)" % (lst, lf, typename)
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
"""
Return a string representation of the data in an array.
The data in the array is returned as a single string. This function is
similar to `array_repr`, the difference being that `array_repr` also
returns information on the kind of array and its data type.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`. The
default is, indirectly, 75.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
See Also
--------
array2string, array_repr, set_printoptions
Examples
--------
>>> np.array_str(np.arange(3))
'[0 1 2]'
"""
return array2string(a, max_line_width, precision, suppress_small, ' ', "", str)
def set_string_function(f, repr=True):
"""
Set a Python function to be used when pretty printing arrays.
Parameters
----------
f : function or None
Function to be used to pretty print arrays. The function should expect
a single array argument and return a string of the representation of
the array. If None, the function is reset to the default NumPy function
to print arrays.
repr : bool, optional
If True (default), the function for pretty printing (``__repr__``)
is set, if False the function that returns the default string
representation (``__str__``) is set.
See Also
--------
set_printoptions, get_printoptions
Examples
--------
>>> def pprint(arr):
... return 'HA! - What are you going to do now?'
...
>>> np.set_string_function(pprint)
>>> a = np.arange(10)
>>> a
HA! - What are you going to do now?
>>> print a
[0 1 2 3 4 5 6 7 8 9]
We can reset the function to the default:
>>> np.set_string_function(None)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
`repr` affects either pretty printing or normal string representation.
Note that ``__repr__`` is still affected by setting ``__str__``
because the width of each array element in the returned string becomes
equal to the length of the result of ``__str__()``.
>>> x = np.arange(4)
>>> np.set_string_function(lambda x:'random', repr=False)
>>> x.__str__()
'random'
>>> x.__repr__()
'array([ 0, 1, 2, 3])'
"""
if f is None:
if repr:
return multiarray.set_string_function(array_repr, 1)
else:
return multiarray.set_string_function(array_str, 0)
else:
return multiarray.set_string_function(f, repr)
set_string_function(array_str, 0)
set_string_function(array_repr, 1)
little_endian = (sys.byteorder == 'little')
def indices(dimensions, dtype=int):
"""
Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0,1,...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : dtype, optional
Data type of the result.
Returns
-------
grid : ndarray
The array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
See Also
--------
mgrid, meshgrid
Notes
-----
The output shape is obtained by prepending the number of dimensions
in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N,r0,...,rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k,i0,i1,...,iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 1, 2],
[0, 1, 2]])
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0, 1, 2],
[4, 5, 6]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
"""
dimensions = tuple(dimensions)
N = len(dimensions)
if N == 0:
return array([],dtype=dtype)
res = empty((N,)+dimensions, dtype=dtype)
for i, dim in enumerate(dimensions):
tmp = arange(dim,dtype=dtype)
tmp.shape = (1,)*i + (dim,)+(1,)*(N-i-1)
newdim = dimensions[:i] + (1,)+ dimensions[i+1:]
val = zeros(newdim, dtype)
add(tmp, val, res[i])
return res
def fromfunction(function, shape, **kwargs):
"""
Construct an array by executing a function over each coordinate.
The resulting array therefore has a value ``fn(x, y, z)`` at
coordinate ``(x, y, z)``.
Parameters
----------
function : callable
The function is called with N parameters, each of which
represents the coordinates of the array varying along a
specific axis. For example, if `shape` were ``(2, 2)``, then
the parameters would be two arrays, ``[[0, 0], [1, 1]]`` and
``[[0, 1], [0, 1]]``. `function` must be capable of operating on
arrays, and should return a scalar value.
shape : (N,) tuple of ints
Shape of the output array, which also determines the shape of
the coordinate arrays passed to `function`.
dtype : data-type, optional
Data-type of the coordinate arrays passed to `function`.
By default, `dtype` is float.
Returns
-------
out : any
The result of the call to `function` is passed back directly.
Therefore the type and shape of `out` is completely determined by
`function`.
See Also
--------
indices, meshgrid
Notes
-----
Keywords other than `shape` and `dtype` are passed to `function`.
Examples
--------
>>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
array([[ True, False, False],
[False, True, False],
[False, False, True]], dtype=bool)
>>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4]])
"""
dtype = kwargs.pop('dtype', float)
args = indices(shape, dtype=dtype)
return function(*args,**kwargs)
def isscalar(num):
"""
Returns True if the type of `num` is a scalar type.
Parameters
----------
num : any
Input argument, can be of any type and shape.
Returns
-------
val : bool
True if `num` is a scalar type, False if it is not.
Examples
--------
>>> np.isscalar(3.1)
True
>>> np.isscalar([3.1])
False
>>> np.isscalar(False)
True
"""
if isinstance(num, generic):
return True
else:
return type(num) in ScalarType
_lkup = {
'0':'0000',
'1':'0001',
'2':'0010',
'3':'0011',
'4':'0100',
'5':'0101',
'6':'0110',
'7':'0111',
'8':'1000',
'9':'1001',
'a':'1010',
'b':'1011',
'c':'1100',
'd':'1101',
'e':'1110',
'f':'1111',
'A':'1010',
'B':'1011',
'C':'1100',
'D':'1101',
'E':'1110',
'F':'1111',
'L':''}
def binary_repr(num, width=None):
"""
Return the binary representation of the input number as a string.
For negative numbers, if width is not given, a minus sign is added to the
front. If width is given, the two's complement of the number is
returned, with respect to that width.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
num : int
Only an integer decimal number can be used.
width : int, optional
The length of the returned string if `num` is positive, the length of
the two's complement if `num` is negative.
Returns
-------
bin : str
Binary representation of `num` or two's complement of `num`.
See Also
--------
base_repr: Return a string representation of a number in the given base
system.
Notes
-----
`binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
faster.
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
>>> np.binary_repr(3)
'11'
>>> np.binary_repr(-3)
'-11'
>>> np.binary_repr(3, width=4)
'0011'
The two's complement is returned when the input number is negative and
width is specified:
>>> np.binary_repr(-3, width=4)
'1101'
"""
sign = ''
if num < 0:
if width is None:
sign = '-'
num = -num
else:
# replace num with its 2-complement
num = 2**width + num
elif num == 0:
return '0'*(width or 1)
ostr = hex(num)
bin = ''.join([_lkup[ch] for ch in ostr[2:]])
bin = bin.lstrip('0')
if width is not None:
bin = bin.zfill(width)
return sign + bin
def base_repr(number, base=2, padding=0):
"""
Return a string representation of a number in the given base system.
Parameters
----------
number : int
The value to convert. Only positive values are handled.
base : int, optional
Convert `number` to the `base` number system. The valid range is 2-36,
the default value is 2.
padding : int, optional
Number of zeros padded on the left. Default is 0 (no padding).
Returns
-------
out : str
String representation of `number` in `base` system.
See Also
--------
binary_repr : Faster version of `base_repr` for base 2.
Examples
--------
>>> np.base_repr(5)
'101'
>>> np.base_repr(6, 5)
'11'
>>> np.base_repr(7, base=5, padding=3)
'00012'
>>> np.base_repr(10, base=16)
'A'
>>> np.base_repr(32, base=16)
'20'
"""
digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if base > len(digits):
raise ValueError("Bases greater than 36 not handled in base_repr.")
num = abs(number)
res = []
while num:
res.append(digits[num % base])
num //= base
if padding:
res.append('0' * padding)
if number < 0:
res.append('-')
return ''.join(reversed(res or '0'))
from cPickle import load, loads
_cload = load
_file = open
def load(file):
"""
Wrapper around cPickle.load which accepts either a file-like object or
a filename.
Note that the NumPy binary format is not based on pickle/cPickle anymore.
For details on the preferred way of loading and saving files, see `load`
and `save`.
See Also
--------
load, save
"""
if isinstance(file, type("")):
file = _file(file,"rb")
return _cload(file)
# These are all essentially abbreviations
# These might wind up in a special abbreviations module
def _maketup(descr, val):
dt = dtype(descr)
# Place val in all scalar tuples:
fields = dt.fields
if fields is None:
return val
else:
res = [_maketup(fields[name][0],val) for name in dt.names]
return tuple(res)
def ones(shape, dtype=None, order='C'):
"""
Return a new array of given shape and type, filled with ones.
Please refer to the documentation for `zeros` for further details.
See Also
--------
zeros, ones_like
Examples
--------
>>> np.ones(5)
array([ 1., 1., 1., 1., 1.])
>>> np.ones((5,), dtype=np.int)
array([1, 1, 1, 1, 1])
>>> np.ones((2, 1))
array([[ 1.],
[ 1.]])
>>> s = (2,2)
>>> np.ones(s)
array([[ 1., 1.],
[ 1., 1.]])
"""
a = empty(shape, dtype, order)
try:
a.fill(1)
# Above is faster now after addition of fast loops.
#a = zeros(shape, dtype, order)
#a+=1
except TypeError:
obj = _maketup(dtype, 1)
a.fill(obj)
return a
def identity(n, dtype=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""
a = zeros((n,n), dtype=dtype)
a.flat[::n+1] = 1
return a
def allclose(a, b, rtol=1.e-5, atol=1.e-8):
"""
Returns True if two arrays are element-wise equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance; False otherwise. If either array contains NaN, then
False is returned.
See Also
--------
all, any, alltrue, sometrue
Notes
-----
If the following equation is element-wise True, then allclose returns
True.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`allclose(a, b)` might be different from `allclose(b, a)` in
some rare cases.
Examples
--------
>>> np.allclose([1e10,1e-7], [1.00001e10,1e-8])
False
>>> np.allclose([1e10,1e-8], [1.00001e10,1e-9])
True
>>> np.allclose([1e10,1e-8], [1.0001e10,1e-9])
False
>>> np.allclose([1.0, np.nan], [1.0, np.nan])
False
"""
x = array(a, copy=False, ndmin=1)
y = array(b, copy=False, ndmin=1)
xinf = isinf(x)
if not all(xinf == isinf(y)):
return False
if not any(xinf):
return all(less_equal(absolute(x-y), atol + rtol * absolute(y)))
if not all(x[xinf] == y[xinf]):
return False
x = x[~xinf]
y = y[~xinf]
return all(less_equal(absolute(x-y), atol + rtol * absolute(y)))
def array_equal(a1, a2):
"""
True if two arrays have the same shape and elements, False otherwise.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
b : bool
Returns True if the arrays are equal.
See Also
--------
allclose: Returns True if two arrays are element-wise equal within a
tolerance.
array_equiv: Returns True if input arrays are shape consistent and all
elements equal.
Examples
--------
>>> np.array_equal([1, 2], [1, 2])
True
>>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
True
>>> np.array_equal([1, 2], [1, 2, 3])
False
>>> np.array_equal([1, 2], [1, 4])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return False
if a1.shape != a2.shape:
return False
return bool(logical_and.reduce(equal(a1,a2).ravel()))
def array_equiv(a1, a2):
"""
Returns True if input arrays are shape consistent and all elements equal.
Shape consistent means they are either the same shape, or one input array
can be broadcasted to create the same shape as the other one.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
out : bool
True if equivalent, False otherwise.
Examples
--------
>>> np.array_equiv([1, 2], [1, 2])
True
>>> np.array_equiv([1, 2], [1, 3])
False
Showing the shape equivalence:
>>> np.array_equiv([1, 2], [[1, 2], [1, 2]])
True
>>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
False
>>> np.array_equiv([1, 2], [[1, 2], [1, 3]])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return False
try:
return bool(logical_and.reduce(equal(a1,a2).ravel()))
except ValueError:
return False
_errdict = {"ignore":ERR_IGNORE,
"warn":ERR_WARN,
"raise":ERR_RAISE,
"call":ERR_CALL,
"print":ERR_PRINT,
"log":ERR_LOG}
_errdict_rev = {}
for key in _errdict.keys():
_errdict_rev[_errdict[key]] = key
del key
def seterr(all=None, divide=None, over=None, under=None, invalid=None):
"""
Set how floating-point errors are handled.
Note that operations on integer scalar types (such as `int16`) are
handled like floating point, and are affected by these settings.
Parameters
----------
all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Set treatment for all types of floating-point errors at once:
- ignore: Take no action when the exception occurs.
- warn: Print a `RuntimeWarning` (via the Python `warnings` module).
- raise: Raise a `FloatingPointError`.
- call: Call a function specified using the `seterrcall` function.
- print: Print a warning directly to ``stdout``.
- log: Record error in a Log object specified by `seterrcall`.
The default is not to change the current behavior.
divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for division by zero.
over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point overflow.
under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point underflow.
invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for invalid floating-point operation.
Returns
-------
old_settings : dict
Dictionary containing the old settings.
See also
--------
seterrcall : Set a callback function for the 'call' mode.
geterr, geterrcall
Notes
-----
The floating-point exceptions are defined in the IEEE 754 standard [1]:
- Division by zero: infinite result obtained from finite numbers.
- Overflow: result too large to be expressed.
- Underflow: result so close to zero that some precision
was lost.
- Invalid operation: result is not an expressible number, typically
indicates that a NaN was produced.
.. [1] http://en.wikipedia.org/wiki/IEEE_754
Examples
--------
>>> old_settings = np.seterr(all='ignore') #seterr to known value
>>> np.seterr(over='raise')
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
'under': 'ignore'}
>>> np.seterr(all='ignore') # reset to default
{'over': 'raise', 'divide': 'ignore', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.int16(32000) * np.int16(3)
30464
>>> old_settings = np.seterr(all='warn', over='raise')
>>> np.int16(32000) * np.int16(3)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: overflow encountered in short_scalars
>>> old_settings = np.seterr(all='print')
>>> np.geterr()
{'over': 'print', 'divide': 'print', 'invalid': 'print', 'under': 'print'}
>>> np.int16(32000) * np.int16(3)
Warning: overflow encountered in short_scalars
30464
"""
pyvals = umath.geterrobj()
old = geterr()
if divide is None: divide = all or old['divide']
if over is None: over = all or old['over']
if under is None: under = all or old['under']
if invalid is None: invalid = all or old['invalid']
maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
(_errdict[over] << SHIFT_OVERFLOW ) +
(_errdict[under] << SHIFT_UNDERFLOW) +
(_errdict[invalid] << SHIFT_INVALID))
pyvals[1] = maskvalue
umath.seterrobj(pyvals)
return old
def geterr():
"""
Get the current way of handling floating-point errors.
Returns
-------
res : dict
A dictionary with keys "divide", "over", "under", and "invalid",
whose values are from the strings "ignore", "print", "log", "warn",
"raise", and "call". The keys represent possible floating-point
exceptions, and the values define how these exceptions are handled.
See Also
--------
geterrcall, seterr, seterrcall
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterr()
{'over': 'warn', 'divide': 'warn', 'invalid': 'warn',
'under': 'ignore'}
>>> np.arange(3.) / np.arange(3.)
array([ NaN, 1., 1.])
>>> oldsettings = np.seterr(all='warn', over='raise')
>>> np.geterr()
{'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'}
>>> np.arange(3.) / np.arange(3.)
__main__:1: RuntimeWarning: invalid value encountered in divide
array([ NaN, 1., 1.])
"""
maskvalue = umath.geterrobj()[1]
mask = 7
res = {}
val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
res['divide'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_OVERFLOW) & mask
res['over'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_UNDERFLOW) & mask
res['under'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_INVALID) & mask
res['invalid'] = _errdict_rev[val]
return res
def setbufsize(size):
"""
Set the size of the buffer used in ufuncs.
Parameters
----------
size : int
Size of buffer.
"""
if size > 10e6:
raise ValueError, "Buffer size, %s, is too big." % size
if size < 5:
raise ValueError, "Buffer size, %s, is too small." %size
if size % 16 != 0:
raise ValueError, "Buffer size, %s, is not a multiple of 16." %size
pyvals = umath.geterrobj()
old = getbufsize()
pyvals[0] = size
umath.seterrobj(pyvals)
return old
def getbufsize():
"""Return the size of the buffer used in ufuncs.
"""
return umath.geterrobj()[0]
def seterrcall(func):
"""
Set the floating-point error callback function or log object.
There are two ways to capture floating-point error messages. The first
is to set the error-handler to 'call', using `seterr`. Then, set
the function to call using this function.
The second is to set the error-handler to 'log', using `seterr`.
Floating-point errors then trigger a call to the 'write' method of
the provided object.
Parameters
----------
func : callable f(err, flag) or object with write method
Function to call upon floating-point errors ('call'-mode) or
object whose 'write' method is used to log such message ('log'-mode).
The call function takes two arguments. The first is the
type of error (one of "divide", "over", "under", or "invalid"),
and the second is the status flag. The flag is a byte, whose
least-significant bits indicate the status::
[0 0 0 0 invalid over under invalid]
In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
If an object is provided, its write method should take one argument,
a string.
Returns
-------
h : callable, log instance or None
The old error handler.
See Also
--------
seterr, geterr, geterrcall
Examples
--------
Callback upon error:
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
...
>>> saved_handler = np.seterrcall(err_handler)
>>> save_err = np.seterr(all='call')
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([ Inf, Inf, Inf])
>>> np.seterrcall(saved_handler)
<function err_handler at 0x...>
>>> np.seterr(**save_err)
{'over': 'call', 'divide': 'call', 'invalid': 'call', 'under': 'call'}
Log error message:
>>> class Log(object):
... def write(self, msg):
... print "LOG: %s" % msg
...
>>> log = Log()
>>> saved_handler = np.seterrcall(log)
>>> save_err = np.seterr(all='log')
>>> np.array([1, 2, 3]) / 0.0
LOG: Warning: divide by zero encountered in divide
<BLANKLINE>
array([ Inf, Inf, Inf])
>>> np.seterrcall(saved_handler)
<__main__.Log object at 0x...>
>>> np.seterr(**save_err)
{'over': 'log', 'divide': 'log', 'invalid': 'log', 'under': 'log'}
"""
if func is not None and not callable(func):
if not hasattr(func, 'write') or not callable(func.write):
raise ValueError, "Only callable can be used as callback"
pyvals = umath.geterrobj()
old = geterrcall()
pyvals[2] = func
umath.seterrobj(pyvals)
return old
def geterrcall():
"""
Return the current callback function used on floating-point errors.
When the error handling for a floating-point error (one of "divide",
"over", "under", or "invalid") is set to 'call' or 'log', the function
that is called or the log instance that is written to is returned by
`geterrcall`. This function or log instance has been set with
`seterrcall`.
Returns
-------
errobj : callable, log instance or None
The current error handler. If no handler was set through `seterrcall`,
``None`` is returned.
See Also
--------
seterrcall, seterr, geterr
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrcall() # we did not yet set a handler, returns None
>>> oldsettings = np.seterr(all='call')
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
>>> oldhandler = np.seterrcall(err_handler)
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([ Inf, Inf, Inf])
>>> cur_handler = np.geterrcall()
>>> cur_handler is err_handler
True
"""
return umath.geterrobj()[2]
class _unspecified(object):
pass
_Unspecified = _unspecified()
class errstate(object):
"""
errstate(**kwargs)
Context manager for floating-point error handling.
Using an instance of `errstate` as a context manager allows statements in
that context to execute with a known error handling behavior. Upon entering
the context the error handling is set with `seterr` and `seterrcall`, and
upon exiting it is reset to what it was before.
Parameters
----------
kwargs : {divide, over, under, invalid}
Keyword arguments. The valid keywords are the possible floating-point
exceptions. Each keyword should have a string value that defines the
treatment for the particular error. Possible values are
{'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
See Also
--------
seterr, geterr, seterrcall, geterrcall
Notes
-----
The ``with`` statement was introduced in Python 2.5, and can only be used
there by importing it: ``from __future__ import with_statement``. In
earlier Python versions the ``with`` statement is not available.
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> from __future__ import with_statement # use 'with' in Python 2.5
>>> olderr = np.seterr(all='ignore') # Set error handling to known state.
>>> np.arange(3) / 0.
array([ NaN, Inf, Inf])
>>> with np.errstate(divide='warn'):
... np.arange(3) / 0.
...
__main__:2: RuntimeWarning: divide by zero encountered in divide
array([ NaN, Inf, Inf])
>>> np.sqrt(-1)
nan
>>> with np.errstate(invalid='raise'):
... np.sqrt(-1)
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
FloatingPointError: invalid value encountered in sqrt
Outside the context the error handling behavior has not changed:
>>> np.geterr()
{'over': 'warn', 'divide': 'warn', 'invalid': 'warn',
'under': 'ignore'}
"""
# Note that we don't want to run the above doctests because they will fail
# without a from __future__ import with_statement
def __init__(self, **kwargs):
self.call = kwargs.pop('call',_Unspecified)
self.kwargs = kwargs
def __enter__(self):
self.oldstate = seterr(**self.kwargs)
if self.call is not _Unspecified:
self.oldcall = seterrcall(self.call)
def __exit__(self, *exc_info):
seterr(**self.oldstate)
if self.call is not _Unspecified:
seterrcall(self.oldcall)
def _setdef():
defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT2, None]
umath.seterrobj(defval)
# set the default values
_setdef()
Inf = inf = infty = Infinity = PINF
nan = NaN = NAN
False_ = bool_(False)
True_ = bool_(True)
import fromnumeric
from fromnumeric import *
extend_all(fromnumeric)
| {
"repo_name": "google/syzygy",
"path": "third_party/numpy/files/numpy/core/numeric.py",
"copies": "16",
"size": "69492",
"license": "apache-2.0",
"hash": 8322618619895369000,
"line_mean": 27.6564948454,
"line_max": 86,
"alpha_frac": 0.569763426,
"autogenerated": false,
"ratio": 3.5589470449656866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = ['newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
'arange', 'array', 'zeros', 'count_nonzero',
'empty', 'broadcast', 'dtype', 'fromstring', 'fromfile',
'frombuffer', 'int_asbuffer', 'where', 'argwhere', 'copyto',
'concatenate', 'fastCopyAndTranspose', 'lexsort', 'set_numeric_ops',
'can_cast', 'promote_types', 'min_scalar_type', 'result_type',
'asarray', 'asanyarray', 'ascontiguousarray', 'asfortranarray',
'isfortran', 'empty_like', 'zeros_like', 'ones_like',
'correlate', 'convolve', 'inner', 'dot', 'einsum', 'outer', 'vdot',
'alterdot', 'restoredot', 'roll', 'rollaxis', 'cross', 'tensordot',
'array2string', 'get_printoptions', 'set_printoptions',
'array_repr', 'array_str', 'set_string_function',
'little_endian', 'require',
'fromiter', 'array_equal', 'array_equiv',
'indices', 'fromfunction', 'isclose',
'load', 'loads', 'isscalar', 'binary_repr', 'base_repr',
'ones', 'identity', 'allclose', 'compare_chararrays', 'putmask',
'seterr', 'geterr', 'setbufsize', 'getbufsize',
'seterrcall', 'geterrcall', 'errstate', 'flatnonzero',
'Inf', 'inf', 'infty', 'Infinity',
'nan', 'NaN', 'False_', 'True_', 'bitwise_not',
'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', 'ALLOW_THREADS',
'ComplexWarning']
import sys
import warnings
import multiarray
import umath
from umath import *
import numerictypes
from numerictypes import *
if sys.version_info[0] < 3:
__all__.extend(['getbuffer', 'newbuffer'])
class ComplexWarning(RuntimeWarning):
"""
The warning raised when casting a complex dtype to a real dtype.
As implemented, casting a complex number to a real discards its imaginary
part, but this behavior may not be what the user actually wants.
"""
pass
bitwise_not = invert
CLIP = multiarray.CLIP
WRAP = multiarray.WRAP
RAISE = multiarray.RAISE
MAXDIMS = multiarray.MAXDIMS
ALLOW_THREADS = multiarray.ALLOW_THREADS
BUFSIZE = multiarray.BUFSIZE
ndarray = multiarray.ndarray
flatiter = multiarray.flatiter
nditer = multiarray.nditer
nested_iters = multiarray.nested_iters
broadcast = multiarray.broadcast
dtype = multiarray.dtype
copyto = multiarray.copyto
ufunc = type(sin)
def zeros_like(a, dtype=None, order='K', subok=True):
"""
Return an array of zeros with the same shape and type as a given array.
With default parameters, is equivalent to ``a.copy().fill(0)``.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
.. versionadded:: 1.6.0
Overrides the data type of the result.
order : {'C', 'F', 'A', or 'K'}, optional
.. versionadded:: 1.6.0
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
Returns
-------
out : ndarray
Array of zeros with the same shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.zeros_like(x)
array([[0, 0, 0],
[0, 0, 0]])
>>> y = np.arange(3, dtype=np.float)
>>> y
array([ 0., 1., 2.])
>>> np.zeros_like(y)
array([ 0., 0., 0.])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok)
multiarray.copyto(res, 0, casting='unsafe')
return res
def ones(shape, dtype=None, order='C'):
"""
Return a new array of given shape and type, filled with ones.
Please refer to the documentation for `zeros` for further details.
See Also
--------
zeros, ones_like
Examples
--------
>>> np.ones(5)
array([ 1., 1., 1., 1., 1.])
>>> np.ones((5,), dtype=np.int)
array([1, 1, 1, 1, 1])
>>> np.ones((2, 1))
array([[ 1.],
[ 1.]])
>>> s = (2,2)
>>> np.ones(s)
array([[ 1., 1.],
[ 1., 1.]])
"""
a = empty(shape, dtype, order)
multiarray.copyto(a, 1, casting='unsafe')
return a
def ones_like(a, dtype=None, order='K', subok=True):
"""
Return an array of ones with the same shape and type as a given array.
With default parameters, is equivalent to ``a.copy().fill(1)``.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
.. versionadded:: 1.6.0
Overrides the data type of the result.
order : {'C', 'F', 'A', or 'K'}, optional
.. versionadded:: 1.6.0
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
Returns
-------
out : ndarray
Array of ones with the same shape and type as `a`.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.ones_like(x)
array([[1, 1, 1],
[1, 1, 1]])
>>> y = np.arange(3, dtype=np.float)
>>> y
array([ 0., 1., 2.])
>>> np.ones_like(y)
array([ 1., 1., 1.])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok)
multiarray.copyto(res, 1, casting='unsafe')
return res
def extend_all(module):
adict = {}
for a in __all__:
adict[a] = 1
try:
mall = getattr(module, '__all__')
except AttributeError:
mall = [k for k in module.__dict__.keys() if not k.startswith('_')]
for a in mall:
if a not in adict:
__all__.append(a)
extend_all(umath)
extend_all(numerictypes)
newaxis = None
arange = multiarray.arange
array = multiarray.array
zeros = multiarray.zeros
count_nonzero = multiarray.count_nonzero
empty = multiarray.empty
empty_like = multiarray.empty_like
fromstring = multiarray.fromstring
fromiter = multiarray.fromiter
fromfile = multiarray.fromfile
frombuffer = multiarray.frombuffer
if sys.version_info[0] < 3:
newbuffer = multiarray.newbuffer
getbuffer = multiarray.getbuffer
int_asbuffer = multiarray.int_asbuffer
where = multiarray.where
concatenate = multiarray.concatenate
fastCopyAndTranspose = multiarray._fastCopyAndTranspose
set_numeric_ops = multiarray.set_numeric_ops
can_cast = multiarray.can_cast
promote_types = multiarray.promote_types
min_scalar_type = multiarray.min_scalar_type
result_type = multiarray.result_type
lexsort = multiarray.lexsort
compare_chararrays = multiarray.compare_chararrays
putmask = multiarray.putmask
einsum = multiarray.einsum
def asarray(a, dtype=None, order=None):
"""
Convert the input to an array.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('F' for FORTRAN)
memory representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
See Also
--------
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asarray(a)
array([1, 2])
Existing arrays are not copied:
>>> a = np.array([1, 2])
>>> np.asarray(a) is a
True
If `dtype` is set, array is copied only if dtype does not match:
>>> a = np.array([1, 2], dtype=np.float32)
>>> np.asarray(a, dtype=np.float32) is a
True
>>> np.asarray(a, dtype=np.float64) is a
False
Contrary to `asanyarray`, ndarray subclasses are not passed through:
>>> issubclass(np.matrix, np.ndarray)
True
>>> a = np.matrix([[1, 2]])
>>> np.asarray(a) is a
False
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order)
def asanyarray(a, dtype=None, order=None):
"""
Convert the input to an ndarray, but pass ndarray subclasses through.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes scalars, lists, lists of tuples, tuples, tuples of tuples,
tuples of lists, and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('F') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray or an ndarray subclass
Array interpretation of `a`. If `a` is an ndarray or a subclass
of ndarray, it is returned as-is and no copy is performed.
See Also
--------
asarray : Similar function which always returns ndarrays.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and
Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asanyarray(a)
array([1, 2])
Instances of `ndarray` subclasses are passed through as-is:
>>> a = np.matrix([1, 2])
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order, subok=True)
def ascontiguousarray(a, dtype=None):
"""
Return a contiguous array in memory (C order).
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
Data-type of returned array.
Returns
-------
out : ndarray
Contiguous array of same shape and content as `a`, with type `dtype`
if specified.
See Also
--------
asfortranarray : Convert input to an ndarray with column-major
memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> np.ascontiguousarray(x, dtype=np.float32)
array([[ 0., 1., 2.],
[ 3., 4., 5.]], dtype=float32)
>>> x.flags['C_CONTIGUOUS']
True
"""
return array(a, dtype, copy=False, order='C', ndmin=1)
def asfortranarray(a, dtype=None):
"""
Return an array laid out in Fortran order in memory.
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
By default, the data-type is inferred from the input data.
Returns
-------
out : ndarray
The input `a` in Fortran, or column-major, order.
See Also
--------
ascontiguousarray : Convert input to a contiguous (C order) array.
asanyarray : Convert input to an ndarray with either row or
column-major memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> y = np.asfortranarray(x)
>>> x.flags['F_CONTIGUOUS']
False
>>> y.flags['F_CONTIGUOUS']
True
"""
return array(a, dtype, copy=False, order='F', ndmin=1)
def require(a, dtype=None, requirements=None):
"""
Return an ndarray of the provided type that satisfies requirements.
This function is useful to be sure that an array with the correct flags
is returned for passing to compiled code (perhaps through ctypes).
Parameters
----------
a : array_like
The object to be converted to a type-and-requirement-satisfying array.
dtype : data-type
The required data-type, the default data-type is float64).
requirements : str or list of str
The requirements list can be any of the following
* 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
* 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
* 'ALIGNED' ('A') - ensure a data-type aligned array
* 'WRITEABLE' ('W') - ensure a writable array
* 'OWNDATA' ('O') - ensure an array that owns its own data
See Also
--------
asarray : Convert input to an ndarray.
asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfortranarray : Convert input to an ndarray with column-major
memory order.
ndarray.flags : Information about the memory layout of the array.
Notes
-----
The returned array will be guaranteed to have the listed requirements
by making a copy if needed.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : False
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
>>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
>>> y.flags
C_CONTIGUOUS : False
F_CONTIGUOUS : True
OWNDATA : True
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
"""
if requirements is None:
requirements = []
else:
requirements = [x.upper() for x in requirements]
if not requirements:
return asanyarray(a, dtype=dtype)
if 'ENSUREARRAY' in requirements or 'E' in requirements:
subok = False
else:
subok = True
arr = array(a, dtype=dtype, copy=False, subok=subok)
copychar = 'A'
if 'FORTRAN' in requirements or \
'F_CONTIGUOUS' in requirements or \
'F' in requirements:
copychar = 'F'
elif 'CONTIGUOUS' in requirements or \
'C_CONTIGUOUS' in requirements or \
'C' in requirements:
copychar = 'C'
for prop in requirements:
if not arr.flags[prop]:
arr = arr.copy(copychar)
break
return arr
def isfortran(a):
"""
Returns True if array is arranged in Fortran-order in memory
and dimension > 1.
Parameters
----------
a : ndarray
Input array.
Examples
--------
np.array allows to specify whether the array is written in C-contiguous
order (last index varies the fastest), or FORTRAN-contiguous order in
memory (first index varies the fastest).
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = np.array([[1, 2, 3], [4, 5, 6]], order='FORTRAN')
>>> b
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(b)
True
The transpose of a C-ordered array is a FORTRAN-ordered array.
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = a.T
>>> b
array([[1, 4],
[2, 5],
[3, 6]])
>>> np.isfortran(b)
True
1-D arrays always evaluate as False.
>>> np.isfortran(np.array([1, 2], order='FORTRAN'))
False
"""
return a.flags.fnc
def argwhere(a):
"""
Find the indices of array elements that are non-zero, grouped by element.
Parameters
----------
a : array_like
Input data.
Returns
-------
index_array : ndarray
Indices of elements that are non-zero. Indices are grouped by element.
See Also
--------
where, nonzero
Notes
-----
``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``.
The output of ``argwhere`` is not suitable for indexing arrays.
For this purpose use ``where(a)`` instead.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argwhere(x>1)
array([[0, 2],
[1, 0],
[1, 1],
[1, 2]])
"""
return transpose(asanyarray(a).nonzero())
def flatnonzero(a):
"""
Return indices that are non-zero in the flattened version of a.
This is equivalent to a.ravel().nonzero()[0].
Parameters
----------
a : ndarray
Input array.
Returns
-------
res : ndarray
Output array, containing the indices of the elements of `a.ravel()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
ravel : Return a 1-D array containing the elements of the input array.
Examples
--------
>>> x = np.arange(-2, 3)
>>> x
array([-2, -1, 0, 1, 2])
>>> np.flatnonzero(x)
array([0, 1, 3, 4])
Use the indices of the non-zero elements as an index array to extract
these elements:
>>> x.ravel()[np.flatnonzero(x)]
array([-2, -1, 1, 2])
"""
return a.ravel().nonzero()[0]
_mode_from_name_dict = {'v': 0,
's' : 1,
'f' : 2}
def _mode_from_name(mode):
if isinstance(mode, basestring):
return _mode_from_name_dict[mode.lower()[0]]
return mode
def correlate(a, v, mode='valid', old_behavior=False):
"""
Cross-correlation of two 1-dimensional sequences.
This function computes the correlation as generally defined in signal
processing texts::
z[k] = sum_n a[n] * conj(v[n+k])
with a and v sequences being zero-padded where necessary and conj being
the conjugate.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `convolve` docstring. Note that the default
is `valid`, unlike `convolve`, which uses `full`.
old_behavior : bool
If True, uses the old behavior from Numeric,
(correlate(a,v) == correlate(v,a), and the conjugate is not taken
for complex arrays). If False, uses the conventional signal
processing definition.
See Also
--------
convolve : Discrete, linear convolution of two one-dimensional sequences.
Examples
--------
>>> np.correlate([1, 2, 3], [0, 1, 0.5])
array([ 3.5])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
array([ 2. , 3.5, 3. ])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
array([ 0.5, 2. , 3.5, 3. , 0. ])
"""
mode = _mode_from_name(mode)
# the old behavior should be made available under a different name, see thread
# http://thread.gmane.org/gmane.comp.python.numeric.general/12609/focus=12630
if old_behavior:
warnings.warn("""
The old behavior of correlate was deprecated for 1.4.0, and will be completely removed
for NumPy 2.0.
The new behavior fits the conventional definition of correlation: inputs are
never swapped, and the second argument is conjugated for complex arrays.""",
DeprecationWarning)
return multiarray.correlate(a,v,mode)
else:
return multiarray.correlate2(a,v,mode)
def convolve(a,v,mode='full'):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
The convolution operator is often seen in signal processing, where it
models the effect of a linear time-invariant system on a signal [1]_. In
probability theory, the sum of two independent random variables is
distributed according to the convolution of their individual
distributions.
Parameters
----------
a : (N,) array_like
First one-dimensional input array.
v : (M,) array_like
Second one-dimensional input array.
mode : {'full', 'valid', 'same'}, optional
'full':
By default, mode is 'full'. This returns the convolution
at each point of overlap, with an output shape of (N+M-1,). At
the end-points of the convolution, the signals do not overlap
completely, and boundary effects may be seen.
'same':
Mode `same` returns output of length ``max(M, N)``. Boundary
effects are still visible.
'valid':
Mode `valid` returns output of length
``max(M, N) - min(M, N) + 1``. The convolution product is only given
for points where the signals overlap completely. Values outside
the signal boundary have no effect.
Returns
-------
out : ndarray
Discrete, linear convolution of `a` and `v`.
See Also
--------
scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier
Transform.
scipy.linalg.toeplitz : Used to construct the convolution operator.
Notes
-----
The discrete convolution operation is defined as
.. math:: (f * g)[n] = \\sum_{m = -\\infty}^{\\infty} f[m] g[n - m]
It can be shown that a convolution :math:`x(t) * y(t)` in time/space
is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier
domain, after appropriate padding (padding is necessary to prevent
circular convolution). Since multiplication is more efficient (faster)
than convolution, the function `scipy.signal.fftconvolve` exploits the
FFT to calculate the convolution of large data-sets.
References
----------
.. [1] Wikipedia, "Convolution", http://en.wikipedia.org/wiki/Convolution.
Examples
--------
Note how the convolution operator flips the second array
before "sliding" the two across one another:
>>> np.convolve([1, 2, 3], [0, 1, 0.5])
array([ 0. , 1. , 2.5, 4. , 1.5])
Only return the middle values of the convolution.
Contains boundary effects, where zeros are taken
into account:
>>> np.convolve([1,2,3],[0,1,0.5], 'same')
array([ 1. , 2.5, 4. ])
The two arrays are of the same length, so there
is only one position where they completely overlap:
>>> np.convolve([1,2,3],[0,1,0.5], 'valid')
array([ 2.5])
"""
a,v = array(a, ndmin=1),array(v, ndmin=1)
if (len(v) > len(a)):
a, v = v, a
if len(a) == 0 :
raise ValueError('a cannot be empty')
if len(v) == 0 :
raise ValueError('v cannot be empty')
mode = _mode_from_name(mode)
return multiarray.correlate(a, v[::-1], mode)
def outer(a,b):
"""
Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a, b : array_like, shape (M,), (N,)
First and second input vectors. Inputs are flattened if they
are not already 1-dimensional.
Returns
-------
out : ndarray, shape (M, N)
``out[i, j] = a[i] * b[j]``
See also
--------
inner, einsum
References
----------
.. [1] : G. H. Golub and C. F. van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
>>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
>>> im
array([[ 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
[ 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
[ 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
>>> grid = rl + im
>>> grid
array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
[-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],
[-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],
[-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],
[-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])
An example using a "vector" of letters:
>>> x = np.array(['a', 'b', 'c'], dtype=object)
>>> np.outer(x, [1, 2, 3])
array([[a, aa, aaa],
[b, bb, bbb],
[c, cc, ccc]], dtype=object)
"""
a = asarray(a)
b = asarray(b)
return a.ravel()[:,newaxis]*b.ravel()[newaxis,:]
# try to import blas optimized dot if available
try:
# importing this changes the dot function for basic 4 types
# to blas-optimized versions.
from _dotblas import dot, vdot, inner, alterdot, restoredot
except ImportError:
# docstrings are in add_newdocs.py
inner = multiarray.inner
dot = multiarray.dot
def vdot(a, b):
return dot(asarray(a).ravel().conj(), asarray(b).ravel())
def alterdot():
pass
def restoredot():
pass
def tensordot(a, b, axes=2):
"""
Compute tensor dot product along specified axes for arrays >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
``a`` and ``b``, and an array_like object containing two array_like
objects, ``(a_axes, b_axes)``, sum the products of ``a``'s and ``b``'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
dimensions of ``a`` and the first ``N`` dimensions of ``b`` are summed
over.
Parameters
----------
a, b : array_like, len(shape) >= 1
Tensors to "dot".
axes : variable type
* integer_like scalar
Number of axes to sum over (applies to both arrays); or
* array_like, shape = (2,), both elements array_like
Axes to be summed over, first sequence applying to ``a``, second
to ``b``.
See Also
--------
dot, einsum
Notes
-----
When there is more than one axis to sum over - and they are not the last
(first) axes of ``a`` (``b``) - the argument ``axes`` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
Examples
--------
A "traditional" example:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> # A slower but equivalent way of computing the same...
>>> d = np.zeros((5,2))
>>> for i in range(5):
... for j in range(2):
... for k in range(3):
... for n in range(4):
... d[i,j] += a[k,n,i] * b[n,k,j]
>>> c == d
array([[ True, True],
[ True, True],
[ True, True],
[ True, True],
[ True, True]], dtype=bool)
An extended example taking advantage of the overloading of + and \\*:
>>> a = np.array(range(1, 9))
>>> a.shape = (2, 2, 2)
>>> A = np.array(('a', 'b', 'c', 'd'), dtype=object)
>>> A.shape = (2, 2)
>>> a; A
array([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])
array([[a, b],
[c, d]], dtype=object)
>>> np.tensordot(a, A) # third argument default is 2
array([abbcccdddd, aaaaabbbbbbcccccccdddddddd], dtype=object)
>>> np.tensordot(a, A, 1)
array([[[acc, bdd],
[aaacccc, bbbdddd]],
[[aaaaacccccc, bbbbbdddddd],
[aaaaaaacccccccc, bbbbbbbdddddddd]]], dtype=object)
>>> np.tensordot(a, A, 0) # "Left for reader" (result too long to incl.)
array([[[[[a, b],
[c, d]],
...
>>> np.tensordot(a, A, (0, 1))
array([[[abbbbb, cddddd],
[aabbbbbb, ccdddddd]],
[[aaabbbbbbb, cccddddddd],
[aaaabbbbbbbb, ccccdddddddd]]], dtype=object)
>>> np.tensordot(a, A, (2, 1))
array([[[abb, cdd],
[aaabbbb, cccdddd]],
[[aaaaabbbbbb, cccccdddddd],
[aaaaaaabbbbbbbb, cccccccdddddddd]]], dtype=object)
>>> np.tensordot(a, A, ((0, 1), (0, 1)))
array([abbbcccccddddddd, aabbbbccccccdddddddd], dtype=object)
>>> np.tensordot(a, A, ((2, 1), (1, 0)))
array([acccbbdddd, aaaaacccccccbbbbbbdddddddd], dtype=object)
"""
try:
iter(axes)
except:
axes_a = range(-axes,0)
axes_b = range(0,axes)
else:
axes_a, axes_b = axes
try:
na = len(axes_a)
axes_a = list(axes_a)
except TypeError:
axes_a = [axes_a]
na = 1
try:
nb = len(axes_b)
axes_b = list(axes_b)
except TypeError:
axes_b = [axes_b]
nb = 1
a, b = asarray(a), asarray(b)
as_ = a.shape
nda = len(a.shape)
bs = b.shape
ndb = len(b.shape)
equal = True
if (na != nb): equal = False
else:
for k in xrange(na):
if as_[axes_a[k]] != bs[axes_b[k]]:
equal = False
break
if axes_a[k] < 0:
axes_a[k] += nda
if axes_b[k] < 0:
axes_b[k] += ndb
if not equal:
raise ValueError("shape-mismatch for sum")
# Move the axes to sum over to the end of "a"
# and to the front of "b"
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = notin + axes_a
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
newshape_a = (-1, N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = axes_b + notin
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
newshape_b = (N2, -1)
oldb = [bs[axis] for axis in notin]
at = a.transpose(newaxes_a).reshape(newshape_a)
bt = b.transpose(newaxes_b).reshape(newshape_b)
res = dot(at, bt)
return res.reshape(olda + oldb)
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements that roll beyond the last position are re-introduced at
the first.
Parameters
----------
a : array_like
Input array.
shift : int
The number of places by which elements are shifted.
axis : int, optional
The axis along which elements are shifted. By default, the array
is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : ndarray
Output array, with the same shape as `a`.
See Also
--------
rollaxis : Roll the specified axis backwards, until it lies in a
given position.
Examples
--------
>>> x = np.arange(10)
>>> np.roll(x, 2)
array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> x2 = np.reshape(x, (2,5))
>>> x2
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> np.roll(x2, 1)
array([[9, 0, 1, 2, 3],
[4, 5, 6, 7, 8]])
>>> np.roll(x2, 1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4]])
>>> np.roll(x2, 1, axis=1)
array([[4, 0, 1, 2, 3],
[9, 5, 6, 7, 8]])
"""
a = asanyarray(a)
if axis is None:
n = a.size
reshape = True
else:
n = a.shape[axis]
reshape = False
shift %= n
indexes = concatenate((arange(n-shift,n),arange(n-shift)))
res = a.take(indexes, axis)
if reshape:
return res.reshape(a.shape)
else:
return res
def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
Parameters
----------
a : ndarray
Input array.
axis : int
The axis to roll backwards. The positions of the other axes do not
change relative to one another.
start : int, optional
The axis is rolled until it lies before this position. The default,
0, results in a "complete" roll.
Returns
-------
res : ndarray
Output array.
See Also
--------
roll : Roll the elements of an array by a number of positions along a
given axis.
Examples
--------
>>> a = np.ones((3,4,5,6))
>>> np.rollaxis(a, 3, 1).shape
(3, 6, 4, 5)
>>> np.rollaxis(a, 2).shape
(5, 3, 4, 6)
>>> np.rollaxis(a, 1, 4).shape
(3, 5, 6, 4)
"""
n = a.ndim
if axis < 0:
axis += n
if start < 0:
start += n
msg = 'rollaxis: %s (%d) must be >=0 and < %d'
if not (0 <= axis < n):
raise ValueError(msg % ('axis', axis, n))
if not (0 <= start < n+1):
raise ValueError(msg % ('start', start, n+1))
if (axis < start): # it's been removed
start -= 1
if axis==start:
return a
axes = range(0,n)
axes.remove(axis)
axes.insert(start, axis)
return a.transpose(axes)
# fix hack in scipy which imports this function
def _move_axis_to_0(a, axis):
return rollaxis(a, axis, 0)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""
Return the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
are defined by the last axis of `a` and `b` by default, and these axes
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
2, the third component of the input vector is assumed to be zero and the
cross product calculated accordingly. In cases where both input vectors
have dimension 2, the z-component of the cross product is returned.
Parameters
----------
a : array_like
Components of the first vector(s).
b : array_like
Components of the second vector(s).
axisa : int, optional
Axis of `a` that defines the vector(s). By default, the last axis.
axisb : int, optional
Axis of `b` that defines the vector(s). By default, the last axis.
axisc : int, optional
Axis of `c` containing the cross product vector(s). By default, the
last axis.
axis : int, optional
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
Returns
-------
c : ndarray
Vector cross product(s).
Raises
------
ValueError
When the dimension of the vector(s) in `a` and/or `b` does not
equal 2 or 3.
See Also
--------
inner : Inner product
outer : Outer product.
ix_ : Construct index arrays.
Examples
--------
Vector cross-product.
>>> x = [1, 2, 3]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([-3, 6, -3])
One vector with dimension 2.
>>> x = [1, 2]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Equivalently:
>>> x = [1, 2, 0]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Both vectors with dimension 2.
>>> x = [1,2]
>>> y = [4,5]
>>> np.cross(x, y)
-3
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the `right-hand rule`.
>>> x = np.array([[1,2,3], [4,5,6]])
>>> y = np.array([[4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[-3, 6, -3],
[ 3, -6, 3]])
The orientation of `c` can be changed using the `axisc` keyword.
>>> np.cross(x, y, axisc=0)
array([[-3, 3],
[ 6, -6],
[-3, 3]])
Change the vector definition of `x` and `y` using `axisa` and `axisb`.
>>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
>>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[ -6, 12, -6],
[ 0, 0, 0],
[ 6, -12, 6]])
>>> np.cross(x, y, axisa=0, axisb=0)
array([[-24, 48, -24],
[-30, 60, -30],
[-36, 72, -36]])
"""
if axis is not None:
axisa,axisb,axisc=(axis,)*3
a = asarray(a).swapaxes(axisa, 0)
b = asarray(b).swapaxes(axisb, 0)
msg = "incompatible dimensions for cross product\n"\
"(dimension must be 2 or 3)"
if (a.shape[0] not in [2,3]) or (b.shape[0] not in [2,3]):
raise ValueError(msg)
if a.shape[0] == 2:
if (b.shape[0] == 2):
cp = a[0]*b[1] - a[1]*b[0]
if cp.ndim == 0:
return cp
else:
return cp.swapaxes(0, axisc)
else:
x = a[1]*b[2]
y = -a[0]*b[2]
z = a[0]*b[1] - a[1]*b[0]
elif a.shape[0] == 3:
if (b.shape[0] == 3):
x = a[1]*b[2] - a[2]*b[1]
y = a[2]*b[0] - a[0]*b[2]
z = a[0]*b[1] - a[1]*b[0]
else:
x = -a[2]*b[1]
y = a[2]*b[0]
z = a[0]*b[1] - a[1]*b[0]
cp = array([x,y,z])
if cp.ndim == 1:
return cp
else:
return cp.swapaxes(0,axisc)
#Use numarray's printing function
from arrayprint import array2string, get_printoptions, set_printoptions
_typelessdata = [int_, float_, complex_]
if issubclass(intc, int):
_typelessdata.append(intc)
if issubclass(longlong, int):
_typelessdata.append(longlong)
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
"""
Return the string representation of an array.
Parameters
----------
arr : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters split the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero, default is False. Very small
is defined by `precision`, if the precision is 8 then
numbers smaller than 5e-9 are represented as zero.
Returns
-------
string : str
The string representation of an array.
See Also
--------
array_str, array2string, set_printoptions
Examples
--------
>>> np.array_repr(np.array([1,2]))
'array([1, 2])'
>>> np.array_repr(np.ma.array([0.]))
'MaskedArray([ 0.])'
>>> np.array_repr(np.array([], np.int32))
'array([], dtype=int32)'
>>> x = np.array([1e-6, 4e-7, 2, 3])
>>> np.array_repr(x, precision=6, suppress_small=True)
'array([ 0.000001, 0. , 2. , 3. ])'
"""
if arr.size > 0 or arr.shape==(0,):
lst = array2string(arr, max_line_width, precision, suppress_small,
', ', "array(")
else: # show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(arr.shape),)
if arr.__class__ is not ndarray:
cName= arr.__class__.__name__
else:
cName = "array"
skipdtype = (arr.dtype.type in _typelessdata) and arr.size > 0
if skipdtype:
return "%s(%s)" % (cName, lst)
else:
typename = arr.dtype.name
# Quote typename in the output if it is "complex".
if typename and not (typename[0].isalpha() and typename.isalnum()):
typename = "'%s'" % typename
lf = ''
if issubclass(arr.dtype.type, flexible):
if arr.dtype.names:
typename = "%s" % str(arr.dtype)
else:
typename = "'%s'" % str(arr.dtype)
lf = '\n'+' '*len("array(")
return cName + "(%s, %sdtype=%s)" % (lst, lf, typename)
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
"""
Return a string representation of the data in an array.
The data in the array is returned as a single string. This function is
similar to `array_repr`, the difference being that `array_repr` also
returns information on the kind of array and its data type.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`. The
default is, indirectly, 75.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
See Also
--------
array2string, array_repr, set_printoptions
Examples
--------
>>> np.array_str(np.arange(3))
'[0 1 2]'
"""
return array2string(a, max_line_width, precision, suppress_small, ' ', "", str)
def set_string_function(f, repr=True):
"""
Set a Python function to be used when pretty printing arrays.
Parameters
----------
f : function or None
Function to be used to pretty print arrays. The function should expect
a single array argument and return a string of the representation of
the array. If None, the function is reset to the default NumPy function
to print arrays.
repr : bool, optional
If True (default), the function for pretty printing (``__repr__``)
is set, if False the function that returns the default string
representation (``__str__``) is set.
See Also
--------
set_printoptions, get_printoptions
Examples
--------
>>> def pprint(arr):
... return 'HA! - What are you going to do now?'
...
>>> np.set_string_function(pprint)
>>> a = np.arange(10)
>>> a
HA! - What are you going to do now?
>>> print a
[0 1 2 3 4 5 6 7 8 9]
We can reset the function to the default:
>>> np.set_string_function(None)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
`repr` affects either pretty printing or normal string representation.
Note that ``__repr__`` is still affected by setting ``__str__``
because the width of each array element in the returned string becomes
equal to the length of the result of ``__str__()``.
>>> x = np.arange(4)
>>> np.set_string_function(lambda x:'random', repr=False)
>>> x.__str__()
'random'
>>> x.__repr__()
'array([ 0, 1, 2, 3])'
"""
if f is None:
if repr:
return multiarray.set_string_function(array_repr, 1)
else:
return multiarray.set_string_function(array_str, 0)
else:
return multiarray.set_string_function(f, repr)
set_string_function(array_str, 0)
set_string_function(array_repr, 1)
little_endian = (sys.byteorder == 'little')
def indices(dimensions, dtype=int):
"""
Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0,1,...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : dtype, optional
Data type of the result.
Returns
-------
grid : ndarray
The array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
See Also
--------
mgrid, meshgrid
Notes
-----
The output shape is obtained by prepending the number of dimensions
in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N,r0,...,rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k,i0,i1,...,iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 1, 2],
[0, 1, 2]])
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0, 1, 2],
[4, 5, 6]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
"""
dimensions = tuple(dimensions)
N = len(dimensions)
if N == 0:
return array([],dtype=dtype)
res = empty((N,)+dimensions, dtype=dtype)
for i, dim in enumerate(dimensions):
tmp = arange(dim,dtype=dtype)
tmp.shape = (1,)*i + (dim,)+(1,)*(N-i-1)
newdim = dimensions[:i] + (1,)+ dimensions[i+1:]
val = zeros(newdim, dtype)
add(tmp, val, res[i])
return res
def fromfunction(function, shape, **kwargs):
"""
Construct an array by executing a function over each coordinate.
The resulting array therefore has a value ``fn(x, y, z)`` at
coordinate ``(x, y, z)``.
Parameters
----------
function : callable
The function is called with N parameters, where N is the rank of
`shape`. Each parameter represents the coordinates of the array
varying along a specific axis. For example, if `shape`
were ``(2, 2)``, then the parameters in turn be (0, 0), (0, 1),
(1, 0), (1, 1).
shape : (N,) tuple of ints
Shape of the output array, which also determines the shape of
the coordinate arrays passed to `function`.
dtype : data-type, optional
Data-type of the coordinate arrays passed to `function`.
By default, `dtype` is float.
Returns
-------
fromfunction : any
The result of the call to `function` is passed back directly.
Therefore the shape of `fromfunction` is completely determined by
`function`. If `function` returns a scalar value, the shape of
`fromfunction` would match the `shape` parameter.
See Also
--------
indices, meshgrid
Notes
-----
Keywords other than `dtype` are passed to `function`.
Examples
--------
>>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
array([[ True, False, False],
[False, True, False],
[False, False, True]], dtype=bool)
>>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4]])
"""
dtype = kwargs.pop('dtype', float)
args = indices(shape, dtype=dtype)
return function(*args,**kwargs)
def isscalar(num):
"""
Returns True if the type of `num` is a scalar type.
Parameters
----------
num : any
Input argument, can be of any type and shape.
Returns
-------
val : bool
True if `num` is a scalar type, False if it is not.
Examples
--------
>>> np.isscalar(3.1)
True
>>> np.isscalar([3.1])
False
>>> np.isscalar(False)
True
"""
if isinstance(num, generic):
return True
else:
return type(num) in ScalarType
_lkup = {
'0':'0000',
'1':'0001',
'2':'0010',
'3':'0011',
'4':'0100',
'5':'0101',
'6':'0110',
'7':'0111',
'8':'1000',
'9':'1001',
'a':'1010',
'b':'1011',
'c':'1100',
'd':'1101',
'e':'1110',
'f':'1111',
'A':'1010',
'B':'1011',
'C':'1100',
'D':'1101',
'E':'1110',
'F':'1111',
'L':''}
def binary_repr(num, width=None):
"""
Return the binary representation of the input number as a string.
For negative numbers, if width is not given, a minus sign is added to the
front. If width is given, the two's complement of the number is
returned, with respect to that width.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
num : int
Only an integer decimal number can be used.
width : int, optional
The length of the returned string if `num` is positive, the length of
the two's complement if `num` is negative.
Returns
-------
bin : str
Binary representation of `num` or two's complement of `num`.
See Also
--------
base_repr: Return a string representation of a number in the given base
system.
Notes
-----
`binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
faster.
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
>>> np.binary_repr(3)
'11'
>>> np.binary_repr(-3)
'-11'
>>> np.binary_repr(3, width=4)
'0011'
The two's complement is returned when the input number is negative and
width is specified:
>>> np.binary_repr(-3, width=4)
'1101'
"""
# ' <-- unbreak Emacs fontification
sign = ''
if num < 0:
if width is None:
sign = '-'
num = -num
else:
# replace num with its 2-complement
num = 2**width + num
elif num == 0:
return '0'*(width or 1)
ostr = hex(num)
bin = ''.join([_lkup[ch] for ch in ostr[2:]])
bin = bin.lstrip('0')
if width is not None:
bin = bin.zfill(width)
return sign + bin
def base_repr(number, base=2, padding=0):
"""
Return a string representation of a number in the given base system.
Parameters
----------
number : int
The value to convert. Only positive values are handled.
base : int, optional
Convert `number` to the `base` number system. The valid range is 2-36,
the default value is 2.
padding : int, optional
Number of zeros padded on the left. Default is 0 (no padding).
Returns
-------
out : str
String representation of `number` in `base` system.
See Also
--------
binary_repr : Faster version of `base_repr` for base 2.
Examples
--------
>>> np.base_repr(5)
'101'
>>> np.base_repr(6, 5)
'11'
>>> np.base_repr(7, base=5, padding=3)
'00012'
>>> np.base_repr(10, base=16)
'A'
>>> np.base_repr(32, base=16)
'20'
"""
digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if base > len(digits):
raise ValueError("Bases greater than 36 not handled in base_repr.")
num = abs(number)
res = []
while num:
res.append(digits[num % base])
num //= base
if padding:
res.append('0' * padding)
if number < 0:
res.append('-')
return ''.join(reversed(res or '0'))
from cPickle import load, loads
_cload = load
_file = open
def load(file):
"""
Wrapper around cPickle.load which accepts either a file-like object or
a filename.
Note that the NumPy binary format is not based on pickle/cPickle anymore.
For details on the preferred way of loading and saving files, see `load`
and `save`.
See Also
--------
load, save
"""
if isinstance(file, type("")):
file = _file(file,"rb")
return _cload(file)
# These are all essentially abbreviations
# These might wind up in a special abbreviations module
def _maketup(descr, val):
dt = dtype(descr)
# Place val in all scalar tuples:
fields = dt.fields
if fields is None:
return val
else:
res = [_maketup(fields[name][0],val) for name in dt.names]
return tuple(res)
def identity(n, dtype=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""
from numpy import eye
return eye(n, dtype=dtype)
def allclose(a, b, rtol=1.e-5, atol=1.e-8):
"""
Returns True if two arrays are element-wise equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
If either array contains one or more NaNs, False is returned.
Infs are treated as equal if they are in the same place and of the same
sign in both arrays.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
Returns
-------
allclose : bool
Returns True if the two arrays are equal within the given
tolerance; False otherwise.
See Also
--------
all, any, alltrue, sometrue
Notes
-----
If the following equation is element-wise True, then allclose returns
True.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`allclose(a, b)` might be different from `allclose(b, a)` in
some rare cases.
Examples
--------
>>> np.allclose([1e10,1e-7], [1.00001e10,1e-8])
False
>>> np.allclose([1e10,1e-8], [1.00001e10,1e-9])
True
>>> np.allclose([1e10,1e-8], [1.0001e10,1e-9])
False
>>> np.allclose([1.0, np.nan], [1.0, np.nan])
False
"""
x = array(a, copy=False, ndmin=1)
y = array(b, copy=False, ndmin=1)
xinf = isinf(x)
yinf = isinf(y)
if any(xinf) or any(yinf):
# Check that x and y have inf's only in the same positions
if not all(xinf == yinf):
return False
# Check that sign of inf's in x and y is the same
if not all(x[xinf] == y[xinf]):
return False
x = x[~xinf]
y = y[~xinf]
return all(less_equal(abs(x-y), atol + rtol * abs(y)))
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within a
tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
Returns
-------
y : array_like
Returns a boolean array of where `a` and `b` are equal within the
given tolerance. If both `a` and `b` are scalars, returns a single
boolean value.
See Also
--------
allclose
Notes
-----
.. versionadded:: 1.7.0
For finite values, isclose uses the following equation to test whether
two floating point values are equivalent.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`isclose(a, b)` might be different from `isclose(b, a)` in
some rare cases.
Examples
--------
>>> np.isclose([1e10,1e-7], [1.00001e10,1e-8])
array([True, False])
>>> np.isclose([1e10,1e-8], [1.00001e10,1e-9])
array([True, True])
>>> np.isclose([1e10,1e-8], [1.0001e10,1e-9])
array([False, True])
>>> np.isclose([1.0, np.nan], [1.0, np.nan])
array([True, False])
>>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
array([True, True])
"""
def within_tol(x, y, atol, rtol):
err = seterr(invalid='ignore')
try:
result = less_equal(abs(x-y), atol + rtol * abs(y))
finally:
seterr(**err)
if isscalar(a) and isscalar(b):
result = bool(result)
return result
x = array(a, copy=False, subok=True, ndmin=1)
y = array(b, copy=False, subok=True, ndmin=1)
xfin = isfinite(x)
yfin = isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = zeros_like(finite, subok=True)
# Because we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * ones_like(cond)
y = y * ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[isnan(x) & isnan(y)] = True
return cond
def array_equal(a1, a2):
"""
True if two arrays have the same shape and elements, False otherwise.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
b : bool
Returns True if the arrays are equal.
See Also
--------
allclose: Returns True if two arrays are element-wise equal within a
tolerance.
array_equiv: Returns True if input arrays are shape consistent and all
elements equal.
Examples
--------
>>> np.array_equal([1, 2], [1, 2])
True
>>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
True
>>> np.array_equal([1, 2], [1, 2, 3])
False
>>> np.array_equal([1, 2], [1, 4])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return False
if a1.shape != a2.shape:
return False
return bool(equal(a1,a2).all())
def array_equiv(a1, a2):
"""
Returns True if input arrays are shape consistent and all elements equal.
Shape consistent means they are either the same shape, or one input array
can be broadcasted to create the same shape as the other one.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
out : bool
True if equivalent, False otherwise.
Examples
--------
>>> np.array_equiv([1, 2], [1, 2])
True
>>> np.array_equiv([1, 2], [1, 3])
False
Showing the shape equivalence:
>>> np.array_equiv([1, 2], [[1, 2], [1, 2]])
True
>>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
False
>>> np.array_equiv([1, 2], [[1, 2], [1, 3]])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return False
try:
return bool(equal(a1,a2).all())
except ValueError:
return False
_errdict = {"ignore":ERR_IGNORE,
"warn":ERR_WARN,
"raise":ERR_RAISE,
"call":ERR_CALL,
"print":ERR_PRINT,
"log":ERR_LOG}
_errdict_rev = {}
for key in _errdict.keys():
_errdict_rev[_errdict[key]] = key
del key
def seterr(all=None, divide=None, over=None, under=None, invalid=None):
"""
Set how floating-point errors are handled.
Note that operations on integer scalar types (such as `int16`) are
handled like floating point, and are affected by these settings.
Parameters
----------
all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Set treatment for all types of floating-point errors at once:
- ignore: Take no action when the exception occurs.
- warn: Print a `RuntimeWarning` (via the Python `warnings` module).
- raise: Raise a `FloatingPointError`.
- call: Call a function specified using the `seterrcall` function.
- print: Print a warning directly to ``stdout``.
- log: Record error in a Log object specified by `seterrcall`.
The default is not to change the current behavior.
divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for division by zero.
over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point overflow.
under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point underflow.
invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for invalid floating-point operation.
Returns
-------
old_settings : dict
Dictionary containing the old settings.
See also
--------
seterrcall : Set a callback function for the 'call' mode.
geterr, geterrcall, errstate
Notes
-----
The floating-point exceptions are defined in the IEEE 754 standard [1]:
- Division by zero: infinite result obtained from finite numbers.
- Overflow: result too large to be expressed.
- Underflow: result so close to zero that some precision
was lost.
- Invalid operation: result is not an expressible number, typically
indicates that a NaN was produced.
.. [1] http://en.wikipedia.org/wiki/IEEE_754
Examples
--------
>>> old_settings = np.seterr(all='ignore') #seterr to known value
>>> np.seterr(over='raise')
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
'under': 'ignore'}
>>> np.seterr(all='ignore') # reset to default
{'over': 'raise', 'divide': 'ignore', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.int16(32000) * np.int16(3)
30464
>>> old_settings = np.seterr(all='warn', over='raise')
>>> np.int16(32000) * np.int16(3)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: overflow encountered in short_scalars
>>> old_settings = np.seterr(all='print')
>>> np.geterr()
{'over': 'print', 'divide': 'print', 'invalid': 'print', 'under': 'print'}
>>> np.int16(32000) * np.int16(3)
Warning: overflow encountered in short_scalars
30464
"""
pyvals = umath.geterrobj()
old = geterr()
if divide is None: divide = all or old['divide']
if over is None: over = all or old['over']
if under is None: under = all or old['under']
if invalid is None: invalid = all or old['invalid']
maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
(_errdict[over] << SHIFT_OVERFLOW ) +
(_errdict[under] << SHIFT_UNDERFLOW) +
(_errdict[invalid] << SHIFT_INVALID))
pyvals[1] = maskvalue
umath.seterrobj(pyvals)
return old
def geterr():
"""
Get the current way of handling floating-point errors.
Returns
-------
res : dict
A dictionary with keys "divide", "over", "under", and "invalid",
whose values are from the strings "ignore", "print", "log", "warn",
"raise", and "call". The keys represent possible floating-point
exceptions, and the values define how these exceptions are handled.
See Also
--------
geterrcall, seterr, seterrcall
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterr()
{'over': 'warn', 'divide': 'warn', 'invalid': 'warn',
'under': 'ignore'}
>>> np.arange(3.) / np.arange(3.)
array([ NaN, 1., 1.])
>>> oldsettings = np.seterr(all='warn', over='raise')
>>> np.geterr()
{'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'}
>>> np.arange(3.) / np.arange(3.)
__main__:1: RuntimeWarning: invalid value encountered in divide
array([ NaN, 1., 1.])
"""
maskvalue = umath.geterrobj()[1]
mask = 7
res = {}
val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
res['divide'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_OVERFLOW) & mask
res['over'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_UNDERFLOW) & mask
res['under'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_INVALID) & mask
res['invalid'] = _errdict_rev[val]
return res
def setbufsize(size):
"""
Set the size of the buffer used in ufuncs.
Parameters
----------
size : int
Size of buffer.
"""
if size > 10e6:
raise ValueError("Buffer size, %s, is too big." % size)
if size < 5:
raise ValueError("Buffer size, %s, is too small." %size)
if size % 16 != 0:
raise ValueError("Buffer size, %s, is not a multiple of 16." %size)
pyvals = umath.geterrobj()
old = getbufsize()
pyvals[0] = size
umath.seterrobj(pyvals)
return old
def getbufsize():
"""
Return the size of the buffer used in ufuncs.
Returns
-------
getbufsize : int
Size of ufunc buffer in bytes.
"""
return umath.geterrobj()[0]
def seterrcall(func):
"""
Set the floating-point error callback function or log object.
There are two ways to capture floating-point error messages. The first
is to set the error-handler to 'call', using `seterr`. Then, set
the function to call using this function.
The second is to set the error-handler to 'log', using `seterr`.
Floating-point errors then trigger a call to the 'write' method of
the provided object.
Parameters
----------
func : callable f(err, flag) or object with write method
Function to call upon floating-point errors ('call'-mode) or
object whose 'write' method is used to log such message ('log'-mode).
The call function takes two arguments. The first is the
type of error (one of "divide", "over", "under", or "invalid"),
and the second is the status flag. The flag is a byte, whose
least-significant bits indicate the status::
[0 0 0 0 invalid over under invalid]
In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
If an object is provided, its write method should take one argument,
a string.
Returns
-------
h : callable, log instance or None
The old error handler.
See Also
--------
seterr, geterr, geterrcall
Examples
--------
Callback upon error:
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
...
>>> saved_handler = np.seterrcall(err_handler)
>>> save_err = np.seterr(all='call')
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([ Inf, Inf, Inf])
>>> np.seterrcall(saved_handler)
<function err_handler at 0x...>
>>> np.seterr(**save_err)
{'over': 'call', 'divide': 'call', 'invalid': 'call', 'under': 'call'}
Log error message:
>>> class Log(object):
... def write(self, msg):
... print "LOG: %s" % msg
...
>>> log = Log()
>>> saved_handler = np.seterrcall(log)
>>> save_err = np.seterr(all='log')
>>> np.array([1, 2, 3]) / 0.0
LOG: Warning: divide by zero encountered in divide
<BLANKLINE>
array([ Inf, Inf, Inf])
>>> np.seterrcall(saved_handler)
<__main__.Log object at 0x...>
>>> np.seterr(**save_err)
{'over': 'log', 'divide': 'log', 'invalid': 'log', 'under': 'log'}
"""
if func is not None and not callable(func):
if not hasattr(func, 'write') or not callable(func.write):
raise ValueError("Only callable can be used as callback")
pyvals = umath.geterrobj()
old = geterrcall()
pyvals[2] = func
umath.seterrobj(pyvals)
return old
def geterrcall():
"""
Return the current callback function used on floating-point errors.
When the error handling for a floating-point error (one of "divide",
"over", "under", or "invalid") is set to 'call' or 'log', the function
that is called or the log instance that is written to is returned by
`geterrcall`. This function or log instance has been set with
`seterrcall`.
Returns
-------
errobj : callable, log instance or None
The current error handler. If no handler was set through `seterrcall`,
``None`` is returned.
See Also
--------
seterrcall, seterr, geterr
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrcall() # we did not yet set a handler, returns None
>>> oldsettings = np.seterr(all='call')
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
>>> oldhandler = np.seterrcall(err_handler)
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([ Inf, Inf, Inf])
>>> cur_handler = np.geterrcall()
>>> cur_handler is err_handler
True
"""
return umath.geterrobj()[2]
class _unspecified(object):
pass
_Unspecified = _unspecified()
class errstate(object):
"""
errstate(**kwargs)
Context manager for floating-point error handling.
Using an instance of `errstate` as a context manager allows statements in
that context to execute with a known error handling behavior. Upon entering
the context the error handling is set with `seterr` and `seterrcall`, and
upon exiting it is reset to what it was before.
Parameters
----------
kwargs : {divide, over, under, invalid}
Keyword arguments. The valid keywords are the possible floating-point
exceptions. Each keyword should have a string value that defines the
treatment for the particular error. Possible values are
{'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
See Also
--------
seterr, geterr, seterrcall, geterrcall
Notes
-----
The ``with`` statement was introduced in Python 2.5, and can only be used
there by importing it: ``from __future__ import with_statement``. In
earlier Python versions the ``with`` statement is not available.
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> from __future__ import with_statement # use 'with' in Python 2.5
>>> olderr = np.seterr(all='ignore') # Set error handling to known state.
>>> np.arange(3) / 0.
array([ NaN, Inf, Inf])
>>> with np.errstate(divide='warn'):
... np.arange(3) / 0.
...
__main__:2: RuntimeWarning: divide by zero encountered in divide
array([ NaN, Inf, Inf])
>>> np.sqrt(-1)
nan
>>> with np.errstate(invalid='raise'):
... np.sqrt(-1)
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
FloatingPointError: invalid value encountered in sqrt
Outside the context the error handling behavior has not changed:
>>> np.geterr()
{'over': 'warn', 'divide': 'warn', 'invalid': 'warn',
'under': 'ignore'}
"""
# Note that we don't want to run the above doctests because they will fail
# without a from __future__ import with_statement
def __init__(self, **kwargs):
self.call = kwargs.pop('call',_Unspecified)
self.kwargs = kwargs
def __enter__(self):
self.oldstate = seterr(**self.kwargs)
if self.call is not _Unspecified:
self.oldcall = seterrcall(self.call)
def __exit__(self, *exc_info):
seterr(**self.oldstate)
if self.call is not _Unspecified:
seterrcall(self.oldcall)
def _setdef():
defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT2, None]
umath.seterrobj(defval)
# set the default values
_setdef()
Inf = inf = infty = Infinity = PINF
nan = NaN = NAN
False_ = bool_(False)
True_ = bool_(True)
import fromnumeric
from fromnumeric import *
extend_all(fromnumeric)
| {
"repo_name": "astrofrog/numpy",
"path": "numpy/core/numeric.py",
"copies": "1",
"size": "74658",
"license": "bsd-3-clause",
"hash": 8354031225215597000,
"line_mean": 27.8366164542,
"line_max": 86,
"alpha_frac": 0.5702001125,
"autogenerated": false,
"ratio": 3.5612478534630796,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9620308022770103,
"avg_score": 0.0022279886385952307,
"num_lines": 2589
} |
__all__ = ['newaxis', 'ndarray', 'flatiter', 'newiter', 'nested_iters', 'ufunc',
'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast',
'dtype', 'fromstring', 'fromfile', 'frombuffer',
'int_asbuffer', 'where', 'argwhere',
'concatenate', 'fastCopyAndTranspose', 'lexsort', 'set_numeric_ops',
'can_cast', 'promote_types', 'min_scalar_type', 'result_type',
'asarray', 'asanyarray', 'ascontiguousarray', 'asfortranarray',
'isfortran', 'empty_like', 'zeros_like',
'correlate', 'convolve', 'inner', 'dot', 'einsum', 'outer', 'vdot',
'alterdot', 'restoredot', 'roll', 'rollaxis', 'cross', 'tensordot',
'array2string', 'get_printoptions', 'set_printoptions',
'array_repr', 'array_str', 'set_string_function',
'little_endian', 'require',
'fromiter', 'array_equal', 'array_equiv',
'indices', 'fromfunction',
'load', 'loads', 'isscalar', 'binary_repr', 'base_repr',
'ones', 'identity', 'allclose', 'compare_chararrays', 'putmask',
'seterr', 'geterr', 'setbufsize', 'getbufsize',
'seterrcall', 'geterrcall', 'errstate', 'flatnonzero',
'Inf', 'inf', 'infty', 'Infinity',
'nan', 'NaN', 'False_', 'True_', 'bitwise_not',
'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', 'ALLOW_THREADS',
'ComplexWarning']
import sys
import warnings
import multiarray
import umath
from umath import *
import numerictypes
from numerictypes import *
if sys.version_info[0] < 3:
__all__.extend(['getbuffer', 'newbuffer'])
class ComplexWarning(RuntimeWarning):
"""
The warning raised when casting a complex dtype to a real dtype.
As implemented, casting a complex number to a real discards its imaginary
part, but this behavior may not be what the user actually wants.
"""
pass
bitwise_not = invert
CLIP = multiarray.CLIP
WRAP = multiarray.WRAP
RAISE = multiarray.RAISE
MAXDIMS = multiarray.MAXDIMS
ALLOW_THREADS = multiarray.ALLOW_THREADS
BUFSIZE = multiarray.BUFSIZE
ndarray = multiarray.ndarray
flatiter = multiarray.flatiter
newiter = multiarray.newiter
nested_iters = multiarray.nested_iters
broadcast = multiarray.broadcast
dtype = multiarray.dtype
ufunc = type(sin)
# originally from Fernando Perez's IPython
def zeros_like(a, dtype=None, order='K'):
"""
Return an array of zeros with the same shape and type as a given array.
With default parameters, is equivalent to ``a.copy().fill(0)``.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
Returns
-------
out : ndarray
Array of zeros with the same shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.zeros_like(x)
array([[0, 0, 0],
[0, 0, 0]])
>>> y = np.arange(3, dtype=np.float)
>>> y
array([ 0., 1., 2.])
>>> np.zeros_like(y)
array([ 0., 0., 0.])
"""
res = empty_like(a, dtype=dtype, order=order)
res.fill(0)
return res
# end Fernando's utilities
def extend_all(module):
adict = {}
for a in __all__:
adict[a] = 1
try:
mall = getattr(module, '__all__')
except AttributeError:
mall = [k for k in module.__dict__.keys() if not k.startswith('_')]
for a in mall:
if a not in adict:
__all__.append(a)
extend_all(umath)
extend_all(numerictypes)
newaxis = None
arange = multiarray.arange
array = multiarray.array
zeros = multiarray.zeros
count_nonzero = multiarray.count_nonzero
empty = multiarray.empty
empty_like = multiarray.empty_like
fromstring = multiarray.fromstring
fromiter = multiarray.fromiter
fromfile = multiarray.fromfile
frombuffer = multiarray.frombuffer
if sys.version_info[0] < 3:
newbuffer = multiarray.newbuffer
getbuffer = multiarray.getbuffer
int_asbuffer = multiarray.int_asbuffer
where = multiarray.where
concatenate = multiarray.concatenate
fastCopyAndTranspose = multiarray._fastCopyAndTranspose
set_numeric_ops = multiarray.set_numeric_ops
can_cast = multiarray.can_cast
promote_types = multiarray.promote_types
min_scalar_type = multiarray.min_scalar_type
result_type = multiarray.result_type
lexsort = multiarray.lexsort
compare_chararrays = multiarray.compare_chararrays
putmask = multiarray.putmask
einsum = multiarray.einsum
def asarray(a, dtype=None, order=None):
"""
Convert the input to an array.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('F' for FORTRAN)
memory representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
See Also
--------
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asarray(a)
array([1, 2])
Existing arrays are not copied:
>>> a = np.array([1, 2])
>>> np.asarray(a) is a
True
If `dtype` is set, array is copied only if dtype does not match:
>>> a = np.array([1, 2], dtype=np.float32)
>>> np.asarray(a, dtype=np.float32) is a
True
>>> np.asarray(a, dtype=np.float64) is a
False
Contrary to `asanyarray`, ndarray subclasses are not passed through:
>>> issubclass(np.matrix, np.ndarray)
True
>>> a = np.matrix([[1, 2]])
>>> np.asarray(a) is a
False
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order)
def asanyarray(a, dtype=None, order=None):
"""
Convert the input to an ndarray, but pass ndarray subclasses through.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes scalars, lists, lists of tuples, tuples, tuples of tuples,
tuples of lists, and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('F') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray or an ndarray subclass
Array interpretation of `a`. If `a` is an ndarray or a subclass
of ndarray, it is returned as-is and no copy is performed.
See Also
--------
asarray : Similar function which always returns ndarrays.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and
Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asanyarray(a)
array([1, 2])
Instances of `ndarray` subclasses are passed through as-is:
>>> a = np.matrix([1, 2])
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order, subok=True)
def ascontiguousarray(a, dtype=None):
"""
Return a contiguous array in memory (C order).
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
Data-type of returned array.
Returns
-------
out : ndarray
Contiguous array of same shape and content as `a`, with type `dtype`
if specified.
See Also
--------
asfortranarray : Convert input to an ndarray with column-major
memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> np.ascontiguousarray(x, dtype=np.float32)
array([[ 0., 1., 2.],
[ 3., 4., 5.]], dtype=float32)
>>> x.flags['C_CONTIGUOUS']
True
"""
return array(a, dtype, copy=False, order='C', ndmin=1)
def asfortranarray(a, dtype=None):
"""
Return an array laid out in Fortran order in memory.
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
By default, the data-type is inferred from the input data.
Returns
-------
out : ndarray
The input `a` in Fortran, or column-major, order.
See Also
--------
ascontiguousarray : Convert input to a contiguous (C order) array.
asanyarray : Convert input to an ndarray with either row or
column-major memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> y = np.asfortranarray(x)
>>> x.flags['F_CONTIGUOUS']
False
>>> y.flags['F_CONTIGUOUS']
True
"""
return array(a, dtype, copy=False, order='F', ndmin=1)
def require(a, dtype=None, requirements=None):
"""
Return an ndarray of the provided type that satisfies requirements.
This function is useful to be sure that an array with the correct flags
is returned for passing to compiled code (perhaps through ctypes).
Parameters
----------
a : array_like
The object to be converted to a type-and-requirement-satisfying array.
dtype : data-type
The required data-type, the default data-type is float64).
requirements : str or list of str
The requirements list can be any of the following
* 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
* 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
* 'ALIGNED' ('A') - ensure a data-type aligned array
* 'WRITEABLE' ('W') - ensure a writable array
* 'OWNDATA' ('O') - ensure an array that owns its own data
See Also
--------
asarray : Convert input to an ndarray.
asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfortranarray : Convert input to an ndarray with column-major
memory order.
ndarray.flags : Information about the memory layout of the array.
Notes
-----
The returned array will be guaranteed to have the listed requirements
by making a copy if needed.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : False
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
>>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
>>> y.flags
C_CONTIGUOUS : False
F_CONTIGUOUS : True
OWNDATA : True
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
"""
if requirements is None:
requirements = []
else:
requirements = [x.upper() for x in requirements]
if not requirements:
return asanyarray(a, dtype=dtype)
if 'ENSUREARRAY' in requirements or 'E' in requirements:
subok = False
else:
subok = True
arr = array(a, dtype=dtype, copy=False, subok=subok)
copychar = 'A'
if 'FORTRAN' in requirements or \
'F_CONTIGUOUS' in requirements or \
'F' in requirements:
copychar = 'F'
elif 'CONTIGUOUS' in requirements or \
'C_CONTIGUOUS' in requirements or \
'C' in requirements:
copychar = 'C'
for prop in requirements:
if not arr.flags[prop]:
arr = arr.copy(copychar)
break
return arr
def isfortran(a):
"""
Returns True if array is arranged in Fortran-order in memory
and dimension > 1.
Parameters
----------
a : ndarray
Input array.
Examples
--------
np.array allows to specify whether the array is written in C-contiguous
order (last index varies the fastest), or FORTRAN-contiguous order in
memory (first index varies the fastest).
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = np.array([[1, 2, 3], [4, 5, 6]], order='FORTRAN')
>>> b
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(b)
True
The transpose of a C-ordered array is a FORTRAN-ordered array.
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = a.T
>>> b
array([[1, 4],
[2, 5],
[3, 6]])
>>> np.isfortran(b)
True
1-D arrays always evaluate as False.
>>> np.isfortran(np.array([1, 2], order='FORTRAN'))
False
"""
return a.flags.fnc
def argwhere(a):
"""
Find the indices of array elements that are non-zero, grouped by element.
Parameters
----------
a : array_like
Input data.
Returns
-------
index_array : ndarray
Indices of elements that are non-zero. Indices are grouped by element.
See Also
--------
where, nonzero
Notes
-----
``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``.
The output of ``argwhere`` is not suitable for indexing arrays.
For this purpose use ``where(a)`` instead.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argwhere(x>1)
array([[0, 2],
[1, 0],
[1, 1],
[1, 2]])
"""
return transpose(asanyarray(a).nonzero())
def flatnonzero(a):
"""
Return indices that are non-zero in the flattened version of a.
This is equivalent to a.ravel().nonzero()[0].
Parameters
----------
a : ndarray
Input array.
Returns
-------
res : ndarray
Output array, containing the indices of the elements of `a.ravel()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
ravel : Return a 1-D array containing the elements of the input array.
Examples
--------
>>> x = np.arange(-2, 3)
>>> x
array([-2, -1, 0, 1, 2])
>>> np.flatnonzero(x)
array([0, 1, 3, 4])
Use the indices of the non-zero elements as an index array to extract
these elements:
>>> x.ravel()[np.flatnonzero(x)]
array([-2, -1, 1, 2])
"""
return a.ravel().nonzero()[0]
_mode_from_name_dict = {'v': 0,
's' : 1,
'f' : 2}
def _mode_from_name(mode):
if isinstance(mode, type("")):
return _mode_from_name_dict[mode.lower()[0]]
return mode
def correlate(a, v, mode='valid', old_behavior=False):
"""
Cross-correlation of two 1-dimensional sequences.
This function computes the correlation as generally defined in signal
processing texts::
z[k] = sum_n a[n] * conj(v[n+k])
with a and v sequences being zero-padded where necessary and conj being
the conjugate.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `convolve` docstring. Note that the default
is `valid`, unlike `convolve`, which uses `full`.
old_behavior : bool
If True, uses the old behavior from Numeric, (correlate(a,v) == correlate(v,
a), and the conjugate is not taken for complex arrays). If False, uses
the conventional signal processing definition (see note).
See Also
--------
convolve : Discrete, linear convolution of two one-dimensional sequences.
Examples
--------
>>> np.correlate([1, 2, 3], [0, 1, 0.5])
array([ 3.5])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
array([ 2. , 3.5, 3. ])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
array([ 0.5, 2. , 3.5, 3. , 0. ])
"""
mode = _mode_from_name(mode)
# the old behavior should be made available under a different name, see thread
# http://thread.gmane.org/gmane.comp.python.numeric.general/12609/focus=12630
if old_behavior:
warnings.warn("""
The old behavior of correlate was deprecated for 1.4.0, and will be completely removed
for NumPy 2.0.
The new behavior fits the conventional definition of correlation: inputs are
never swapped, and the second argument is conjugated for complex arrays.""",
DeprecationWarning)
return multiarray.correlate(a,v,mode)
else:
return multiarray.correlate2(a,v,mode)
def convolve(a,v,mode='full'):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
The convolution operator is often seen in signal processing, where it
models the effect of a linear time-invariant system on a signal [1]_. In
probability theory, the sum of two independent random variables is
distributed according to the convolution of their individual
distributions.
Parameters
----------
a : (N,) array_like
First one-dimensional input array.
v : (M,) array_like
Second one-dimensional input array.
mode : {'full', 'valid', 'same'}, optional
'full':
By default, mode is 'full'. This returns the convolution
at each point of overlap, with an output shape of (N+M-1,). At
the end-points of the convolution, the signals do not overlap
completely, and boundary effects may be seen.
'same':
Mode `same` returns output of length ``max(M, N)``. Boundary
effects are still visible.
'valid':
Mode `valid` returns output of length
``max(M, N) - min(M, N) + 1``. The convolution product is only given
for points where the signals overlap completely. Values outside
the signal boundary have no effect.
Returns
-------
out : ndarray
Discrete, linear convolution of `a` and `v`.
See Also
--------
scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier
Transform.
scipy.linalg.toeplitz : Used to construct the convolution operator.
Notes
-----
The discrete convolution operation is defined as
.. math:: (f * g)[n] = \\sum_{m = -\\infty}^{\\infty} f[m] g[n - m]
It can be shown that a convolution :math:`x(t) * y(t)` in time/space
is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier
domain, after appropriate padding (padding is necessary to prevent
circular convolution). Since multiplication is more efficient (faster)
than convolution, the function `scipy.signal.fftconvolve` exploits the
FFT to calculate the convolution of large data-sets.
References
----------
.. [1] Wikipedia, "Convolution", http://en.wikipedia.org/wiki/Convolution.
Examples
--------
Note how the convolution operator flips the second array
before "sliding" the two across one another:
>>> np.convolve([1, 2, 3], [0, 1, 0.5])
array([ 0. , 1. , 2.5, 4. , 1.5])
Only return the middle values of the convolution.
Contains boundary effects, where zeros are taken
into account:
>>> np.convolve([1,2,3],[0,1,0.5], 'same')
array([ 1. , 2.5, 4. ])
The two arrays are of the same length, so there
is only one position where they completely overlap:
>>> np.convolve([1,2,3],[0,1,0.5], 'valid')
array([ 2.5])
"""
a,v = array(a, ndmin=1),array(v, ndmin=1)
if (len(v) > len(a)):
a, v = v, a
if len(a) == 0 :
raise ValueError('a cannot be empty')
if len(v) == 0 :
raise ValueError('v cannot be empty')
mode = _mode_from_name(mode)
return multiarray.correlate(a, v[::-1], mode)
def outer(a,b):
"""
Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a, b : array_like, shape (M,), (N,)
First and second input vectors. Inputs are flattened if they
are not already 1-dimensional.
Returns
-------
out : ndarray, shape (M, N)
``out[i, j] = a[i] * b[j]``
See also
--------
inner, einsum
References
----------
.. [1] : G. H. Golub and C. F. van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
>>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
>>> im
array([[ 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
[ 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
[ 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
>>> grid = rl + im
>>> grid
array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
[-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],
[-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],
[-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],
[-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])
An example using a "vector" of letters:
>>> x = np.array(['a', 'b', 'c'], dtype=object)
>>> np.outer(x, [1, 2, 3])
array([[a, aa, aaa],
[b, bb, bbb],
[c, cc, ccc]], dtype=object)
"""
a = asarray(a)
b = asarray(b)
return a.ravel()[:,newaxis]*b.ravel()[newaxis,:]
# try to import blas optimized dot if available
try:
# importing this changes the dot function for basic 4 types
# to blas-optimized versions.
from _dotblas import dot, vdot, inner, alterdot, restoredot
except ImportError:
# docstrings are in add_newdocs.py
inner = multiarray.inner
dot = multiarray.dot
def vdot(a, b):
return dot(asarray(a).ravel().conj(), asarray(b).ravel())
def alterdot():
pass
def restoredot():
pass
def tensordot(a, b, axes=2):
"""
Compute tensor dot product along specified axes for arrays >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
``a`` and ``b``, and an array_like object containing two array_like
objects, ``(a_axes, b_axes)``, sum the products of ``a``'s and ``b``'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
dimensions of ``a`` and the first ``N`` dimensions of ``b`` are summed
over.
Parameters
----------
a, b : array_like, len(shape) >= 1
Tensors to "dot".
axes : variable type
* integer_like scalar
Number of axes to sum over (applies to both arrays); or
* array_like, shape = (2,), both elements array_like
Axes to be summed over, first sequence applying to ``a``, second
to ``b``.
See Also
--------
dot, einsum
Notes
-----
When there is more than one axis to sum over - and they are not the last
(first) axes of ``a`` (``b``) - the argument ``axes`` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
Examples
--------
A "traditional" example:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> # A slower but equivalent way of computing the same...
>>> d = np.zeros((5,2))
>>> for i in range(5):
... for j in range(2):
... for k in range(3):
... for n in range(4):
... d[i,j] += a[k,n,i] * b[n,k,j]
>>> c == d
array([[ True, True],
[ True, True],
[ True, True],
[ True, True],
[ True, True]], dtype=bool)
An extended example taking advantage of the overloading of + and \\*:
>>> a = np.array(range(1, 9))
>>> a.shape = (2, 2, 2)
>>> A = np.array(('a', 'b', 'c', 'd'), dtype=object)
>>> A.shape = (2, 2)
>>> a; A
array([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])
array([[a, b],
[c, d]], dtype=object)
>>> np.tensordot(a, A) # third argument default is 2
array([abbcccdddd, aaaaabbbbbbcccccccdddddddd], dtype=object)
>>> np.tensordot(a, A, 1)
array([[[acc, bdd],
[aaacccc, bbbdddd]],
[[aaaaacccccc, bbbbbdddddd],
[aaaaaaacccccccc, bbbbbbbdddddddd]]], dtype=object)
>>> np.tensordot(a, A, 0) # "Left for reader" (result too long to incl.)
array([[[[[a, b],
[c, d]],
...
>>> np.tensordot(a, A, (0, 1))
array([[[abbbbb, cddddd],
[aabbbbbb, ccdddddd]],
[[aaabbbbbbb, cccddddddd],
[aaaabbbbbbbb, ccccdddddddd]]], dtype=object)
>>> np.tensordot(a, A, (2, 1))
array([[[abb, cdd],
[aaabbbb, cccdddd]],
[[aaaaabbbbbb, cccccdddddd],
[aaaaaaabbbbbbbb, cccccccdddddddd]]], dtype=object)
>>> np.tensordot(a, A, ((0, 1), (0, 1)))
array([abbbcccccddddddd, aabbbbccccccdddddddd], dtype=object)
>>> np.tensordot(a, A, ((2, 1), (1, 0)))
array([acccbbdddd, aaaaacccccccbbbbbbdddddddd], dtype=object)
"""
try:
iter(axes)
except:
axes_a = range(-axes,0)
axes_b = range(0,axes)
else:
axes_a, axes_b = axes
try:
na = len(axes_a)
axes_a = list(axes_a)
except TypeError:
axes_a = [axes_a]
na = 1
try:
nb = len(axes_b)
axes_b = list(axes_b)
except TypeError:
axes_b = [axes_b]
nb = 1
a, b = asarray(a), asarray(b)
as_ = a.shape
nda = len(a.shape)
bs = b.shape
ndb = len(b.shape)
equal = True
if (na != nb): equal = False
else:
for k in xrange(na):
if as_[axes_a[k]] != bs[axes_b[k]]:
equal = False
break
if axes_a[k] < 0:
axes_a[k] += nda
if axes_b[k] < 0:
axes_b[k] += ndb
if not equal:
raise ValueError, "shape-mismatch for sum"
# Move the axes to sum over to the end of "a"
# and to the front of "b"
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = notin + axes_a
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
newshape_a = (-1, N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = axes_b + notin
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
newshape_b = (N2, -1)
oldb = [bs[axis] for axis in notin]
at = a.transpose(newaxes_a).reshape(newshape_a)
bt = b.transpose(newaxes_b).reshape(newshape_b)
res = dot(at, bt)
return res.reshape(olda + oldb)
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements that roll beyond the last position are re-introduced at
the first.
Parameters
----------
a : array_like
Input array.
shift : int
The number of places by which elements are shifted.
axis : int, optional
The axis along which elements are shifted. By default, the array
is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : ndarray
Output array, with the same shape as `a`.
See Also
--------
rollaxis : Roll the specified axis backwards, until it lies in a
given position.
Examples
--------
>>> x = np.arange(10)
>>> np.roll(x, 2)
array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> x2 = np.reshape(x, (2,5))
>>> x2
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> np.roll(x2, 1)
array([[9, 0, 1, 2, 3],
[4, 5, 6, 7, 8]])
>>> np.roll(x2, 1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4]])
>>> np.roll(x2, 1, axis=1)
array([[4, 0, 1, 2, 3],
[9, 5, 6, 7, 8]])
"""
a = asanyarray(a)
if axis is None:
n = a.size
reshape = True
else:
n = a.shape[axis]
reshape = False
shift %= n
indexes = concatenate((arange(n-shift,n),arange(n-shift)))
res = a.take(indexes, axis)
if reshape:
return res.reshape(a.shape)
else:
return res
def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
Parameters
----------
a : ndarray
Input array.
axis : int
The axis to roll backwards. The positions of the other axes do not
change relative to one another.
start : int, optional
The axis is rolled until it lies before this position. The default,
0, results in a "complete" roll.
Returns
-------
res : ndarray
Output array.
See Also
--------
roll : Roll the elements of an array by a number of positions along a
given axis.
Examples
--------
>>> a = np.ones((3,4,5,6))
>>> np.rollaxis(a, 3, 1).shape
(3, 6, 4, 5)
>>> np.rollaxis(a, 2).shape
(5, 3, 4, 6)
>>> np.rollaxis(a, 1, 4).shape
(3, 5, 6, 4)
"""
n = a.ndim
if axis < 0:
axis += n
if start < 0:
start += n
msg = 'rollaxis: %s (%d) must be >=0 and < %d'
if not (0 <= axis < n):
raise ValueError, msg % ('axis', axis, n)
if not (0 <= start < n+1):
raise ValueError, msg % ('start', start, n+1)
if (axis < start): # it's been removed
start -= 1
if axis==start:
return a
axes = range(0,n)
axes.remove(axis)
axes.insert(start, axis)
return a.transpose(axes)
# fix hack in scipy which imports this function
def _move_axis_to_0(a, axis):
return rollaxis(a, axis, 0)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""
Return the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
are defined by the last axis of `a` and `b` by default, and these axes
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
2, the third component of the input vector is assumed to be zero and the
cross product calculated accordingly. In cases where both input vectors
have dimension 2, the z-component of the cross product is returned.
Parameters
----------
a : array_like
Components of the first vector(s).
b : array_like
Components of the second vector(s).
axisa : int, optional
Axis of `a` that defines the vector(s). By default, the last axis.
axisb : int, optional
Axis of `b` that defines the vector(s). By default, the last axis.
axisc : int, optional
Axis of `c` containing the cross product vector(s). By default, the
last axis.
axis : int, optional
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
Returns
-------
c : ndarray
Vector cross product(s).
Raises
------
ValueError
When the dimension of the vector(s) in `a` and/or `b` does not
equal 2 or 3.
See Also
--------
inner : Inner product
outer : Outer product.
ix_ : Construct index arrays.
Examples
--------
Vector cross-product.
>>> x = [1, 2, 3]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([-3, 6, -3])
One vector with dimension 2.
>>> x = [1, 2]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Equivalently:
>>> x = [1, 2, 0]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Both vectors with dimension 2.
>>> x = [1,2]
>>> y = [4,5]
>>> np.cross(x, y)
-3
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the `right-hand rule`.
>>> x = np.array([[1,2,3], [4,5,6]])
>>> y = np.array([[4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[-3, 6, -3],
[ 3, -6, 3]])
The orientation of `c` can be changed using the `axisc` keyword.
>>> np.cross(x, y, axisc=0)
array([[-3, 3],
[ 6, -6],
[-3, 3]])
Change the vector definition of `x` and `y` using `axisa` and `axisb`.
>>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
>>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[ -6, 12, -6],
[ 0, 0, 0],
[ 6, -12, 6]])
>>> np.cross(x, y, axisa=0, axisb=0)
array([[-24, 48, -24],
[-30, 60, -30],
[-36, 72, -36]])
"""
if axis is not None:
axisa,axisb,axisc=(axis,)*3
a = asarray(a).swapaxes(axisa, 0)
b = asarray(b).swapaxes(axisb, 0)
msg = "incompatible dimensions for cross product\n"\
"(dimension must be 2 or 3)"
if (a.shape[0] not in [2,3]) or (b.shape[0] not in [2,3]):
raise ValueError(msg)
if a.shape[0] == 2:
if (b.shape[0] == 2):
cp = a[0]*b[1] - a[1]*b[0]
if cp.ndim == 0:
return cp
else:
return cp.swapaxes(0, axisc)
else:
x = a[1]*b[2]
y = -a[0]*b[2]
z = a[0]*b[1] - a[1]*b[0]
elif a.shape[0] == 3:
if (b.shape[0] == 3):
x = a[1]*b[2] - a[2]*b[1]
y = a[2]*b[0] - a[0]*b[2]
z = a[0]*b[1] - a[1]*b[0]
else:
x = -a[2]*b[1]
y = a[2]*b[0]
z = a[0]*b[1] - a[1]*b[0]
cp = array([x,y,z])
if cp.ndim == 1:
return cp
else:
return cp.swapaxes(0,axisc)
#Use numarray's printing function
from arrayprint import array2string, get_printoptions, set_printoptions
_typelessdata = [int_, float_, complex_]
if issubclass(intc, int):
_typelessdata.append(intc)
if issubclass(longlong, int):
_typelessdata.append(longlong)
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
"""
Return the string representation of an array.
Parameters
----------
arr : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters split the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero, default is False. Very small
is defined by `precision`, if the precision is 8 then
numbers smaller than 5e-9 are represented as zero.
Returns
-------
string : str
The string representation of an array.
See Also
--------
array_str, array2string, set_printoptions
Examples
--------
>>> np.array_repr(np.array([1,2]))
'array([1, 2])'
>>> np.array_repr(np.ma.array([0.]))
'MaskedArray([ 0.])'
>>> np.array_repr(np.array([], np.int32))
'array([], dtype=int32)'
>>> x = np.array([1e-6, 4e-7, 2, 3])
>>> np.array_repr(x, precision=6, suppress_small=True)
'array([ 0.000001, 0. , 2. , 3. ])'
"""
if arr.size > 0 or arr.shape==(0,):
lst = array2string(arr, max_line_width, precision, suppress_small,
', ', "array(")
else: # show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(arr.shape),)
typeless = arr.dtype.type in _typelessdata
if arr.__class__ is not ndarray:
cName= arr.__class__.__name__
else:
cName = "array"
if typeless and arr.size:
return cName + "(%s)" % lst
else:
typename=arr.dtype.name
lf = ''
if issubclass(arr.dtype.type, flexible):
if arr.dtype.names:
typename = "%s" % str(arr.dtype)
else:
typename = "'%s'" % str(arr.dtype)
lf = '\n'+' '*len("array(")
return cName + "(%s, %sdtype=%s)" % (lst, lf, typename)
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
"""
Return a string representation of the data in an array.
The data in the array is returned as a single string. This function is
similar to `array_repr`, the difference being that `array_repr` also
returns information on the kind of array and its data type.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`. The
default is, indirectly, 75.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
See Also
--------
array2string, array_repr, set_printoptions
Examples
--------
>>> np.array_str(np.arange(3))
'[0 1 2]'
"""
return array2string(a, max_line_width, precision, suppress_small, ' ', "", str)
def set_string_function(f, repr=True):
"""
Set a Python function to be used when pretty printing arrays.
Parameters
----------
f : function or None
Function to be used to pretty print arrays. The function should expect
a single array argument and return a string of the representation of
the array. If None, the function is reset to the default NumPy function
to print arrays.
repr : bool, optional
If True (default), the function for pretty printing (``__repr__``)
is set, if False the function that returns the default string
representation (``__str__``) is set.
See Also
--------
set_printoptions, get_printoptions
Examples
--------
>>> def pprint(arr):
... return 'HA! - What are you going to do now?'
...
>>> np.set_string_function(pprint)
>>> a = np.arange(10)
>>> a
HA! - What are you going to do now?
>>> print a
[0 1 2 3 4 5 6 7 8 9]
We can reset the function to the default:
>>> np.set_string_function(None)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
`repr` affects either pretty printing or normal string representation.
Note that ``__repr__`` is still affected by setting ``__str__``
because the width of each array element in the returned string becomes
equal to the length of the result of ``__str__()``.
>>> x = np.arange(4)
>>> np.set_string_function(lambda x:'random', repr=False)
>>> x.__str__()
'random'
>>> x.__repr__()
'array([ 0, 1, 2, 3])'
"""
if f is None:
if repr:
return multiarray.set_string_function(array_repr, 1)
else:
return multiarray.set_string_function(array_str, 0)
else:
return multiarray.set_string_function(f, repr)
set_string_function(array_str, 0)
set_string_function(array_repr, 1)
little_endian = (sys.byteorder == 'little')
def indices(dimensions, dtype=int):
"""
Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0,1,...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : dtype, optional
Data type of the result.
Returns
-------
grid : ndarray
The array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
See Also
--------
mgrid, meshgrid
Notes
-----
The output shape is obtained by prepending the number of dimensions
in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N,r0,...,rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k,i0,i1,...,iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 1, 2],
[0, 1, 2]])
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0, 1, 2],
[4, 5, 6]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
"""
dimensions = tuple(dimensions)
N = len(dimensions)
if N == 0:
return array([],dtype=dtype)
res = empty((N,)+dimensions, dtype=dtype)
for i, dim in enumerate(dimensions):
tmp = arange(dim,dtype=dtype)
tmp.shape = (1,)*i + (dim,)+(1,)*(N-i-1)
newdim = dimensions[:i] + (1,)+ dimensions[i+1:]
val = zeros(newdim, dtype)
add(tmp, val, res[i])
return res
def fromfunction(function, shape, **kwargs):
"""
Construct an array by executing a function over each coordinate.
The resulting array therefore has a value ``fn(x, y, z)`` at
coordinate ``(x, y, z)``.
Parameters
----------
function : callable
The function is called with N parameters, each of which
represents the coordinates of the array varying along a
specific axis. For example, if `shape` were ``(2, 2)``, then
the parameters would be two arrays, ``[[0, 0], [1, 1]]`` and
``[[0, 1], [0, 1]]``. `function` must be capable of operating on
arrays, and should return a scalar value.
shape : (N,) tuple of ints
Shape of the output array, which also determines the shape of
the coordinate arrays passed to `function`.
dtype : data-type, optional
Data-type of the coordinate arrays passed to `function`.
By default, `dtype` is float.
Returns
-------
out : any
The result of the call to `function` is passed back directly.
Therefore the type and shape of `out` is completely determined by
`function`.
See Also
--------
indices, meshgrid
Notes
-----
Keywords other than `shape` and `dtype` are passed to `function`.
Examples
--------
>>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
array([[ True, False, False],
[False, True, False],
[False, False, True]], dtype=bool)
>>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4]])
"""
dtype = kwargs.pop('dtype', float)
args = indices(shape, dtype=dtype)
return function(*args,**kwargs)
def isscalar(num):
"""
Returns True if the type of `num` is a scalar type.
Parameters
----------
num : any
Input argument, can be of any type and shape.
Returns
-------
val : bool
True if `num` is a scalar type, False if it is not.
Examples
--------
>>> np.isscalar(3.1)
True
>>> np.isscalar([3.1])
False
>>> np.isscalar(False)
True
"""
if isinstance(num, generic):
return True
else:
return type(num) in ScalarType
_lkup = {
'0':'0000',
'1':'0001',
'2':'0010',
'3':'0011',
'4':'0100',
'5':'0101',
'6':'0110',
'7':'0111',
'8':'1000',
'9':'1001',
'a':'1010',
'b':'1011',
'c':'1100',
'd':'1101',
'e':'1110',
'f':'1111',
'A':'1010',
'B':'1011',
'C':'1100',
'D':'1101',
'E':'1110',
'F':'1111',
'L':''}
def binary_repr(num, width=None):
"""
Return the binary representation of the input number as a string.
For negative numbers, if width is not given, a minus sign is added to the
front. If width is given, the two's complement of the number is
returned, with respect to that width.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
num : int
Only an integer decimal number can be used.
width : int, optional
The length of the returned string if `num` is positive, the length of
the two's complement if `num` is negative.
Returns
-------
bin : str
Binary representation of `num` or two's complement of `num`.
See Also
--------
base_repr: Return a string representation of a number in the given base
system.
Notes
-----
`binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
faster.
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
>>> np.binary_repr(3)
'11'
>>> np.binary_repr(-3)
'-11'
>>> np.binary_repr(3, width=4)
'0011'
The two's complement is returned when the input number is negative and
width is specified:
>>> np.binary_repr(-3, width=4)
'1101'
"""
sign = ''
if num < 0:
if width is None:
sign = '-'
num = -num
else:
# replace num with its 2-complement
num = 2**width + num
elif num == 0:
return '0'*(width or 1)
ostr = hex(num)
bin = ''.join([_lkup[ch] for ch in ostr[2:]])
bin = bin.lstrip('0')
if width is not None:
bin = bin.zfill(width)
return sign + bin
def base_repr(number, base=2, padding=0):
"""
Return a string representation of a number in the given base system.
Parameters
----------
number : int
The value to convert. Only positive values are handled.
base : int, optional
Convert `number` to the `base` number system. The valid range is 2-36,
the default value is 2.
padding : int, optional
Number of zeros padded on the left. Default is 0 (no padding).
Returns
-------
out : str
String representation of `number` in `base` system.
See Also
--------
binary_repr : Faster version of `base_repr` for base 2.
Examples
--------
>>> np.base_repr(5)
'101'
>>> np.base_repr(6, 5)
'11'
>>> np.base_repr(7, base=5, padding=3)
'00012'
>>> np.base_repr(10, base=16)
'A'
>>> np.base_repr(32, base=16)
'20'
"""
digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if base > len(digits):
raise ValueError("Bases greater than 36 not handled in base_repr.")
num = abs(number)
res = []
while num:
res.append(digits[num % base])
num //= base
if padding:
res.append('0' * padding)
if number < 0:
res.append('-')
return ''.join(reversed(res or '0'))
from cPickle import load, loads
_cload = load
_file = open
def load(file):
"""
Wrapper around cPickle.load which accepts either a file-like object or
a filename.
Note that the NumPy binary format is not based on pickle/cPickle anymore.
For details on the preferred way of loading and saving files, see `load`
and `save`.
See Also
--------
load, save
"""
if isinstance(file, type("")):
file = _file(file,"rb")
return _cload(file)
# These are all essentially abbreviations
# These might wind up in a special abbreviations module
def _maketup(descr, val):
dt = dtype(descr)
# Place val in all scalar tuples:
fields = dt.fields
if fields is None:
return val
else:
res = [_maketup(fields[name][0],val) for name in dt.names]
return tuple(res)
def ones(shape, dtype=None, order='C'):
"""
Return a new array of given shape and type, filled with ones.
Please refer to the documentation for `zeros` for further details.
See Also
--------
zeros, ones_like
Examples
--------
>>> np.ones(5)
array([ 1., 1., 1., 1., 1.])
>>> np.ones((5,), dtype=np.int)
array([1, 1, 1, 1, 1])
>>> np.ones((2, 1))
array([[ 1.],
[ 1.]])
>>> s = (2,2)
>>> np.ones(s)
array([[ 1., 1.],
[ 1., 1.]])
"""
a = empty(shape, dtype, order)
try:
a.fill(1)
# Above is faster now after addition of fast loops.
#a = zeros(shape, dtype, order)
#a+=1
except TypeError:
obj = _maketup(dtype, 1)
a.fill(obj)
return a
def identity(n, dtype=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""
a = zeros((n,n), dtype=dtype)
a.flat[::n+1] = 1
return a
def allclose(a, b, rtol=1.e-5, atol=1.e-8):
"""
Returns True if two arrays are element-wise equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance; False otherwise. If either array contains NaN, then
False is returned.
See Also
--------
all, any, alltrue, sometrue
Notes
-----
If the following equation is element-wise True, then allclose returns
True.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`allclose(a, b)` might be different from `allclose(b, a)` in
some rare cases.
Examples
--------
>>> np.allclose([1e10,1e-7], [1.00001e10,1e-8])
False
>>> np.allclose([1e10,1e-8], [1.00001e10,1e-9])
True
>>> np.allclose([1e10,1e-8], [1.0001e10,1e-9])
False
>>> np.allclose([1.0, np.nan], [1.0, np.nan])
False
"""
x = array(a, copy=False)
y = array(b, copy=False)
xinf = isinf(x)
if not all(xinf == isinf(y)):
return False
if not any(xinf):
return all(less_equal(absolute(x-y), atol + rtol * absolute(y)))
if not all(x[xinf] == y[xinf]):
return False
x = x[~xinf]
y = y[~xinf]
return all(less_equal(absolute(x-y), atol + rtol * absolute(y)))
def array_equal(a1, a2):
"""
True if two arrays have the same shape and elements, False otherwise.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
b : bool
Returns True if the arrays are equal.
See Also
--------
allclose: Returns True if two arrays are element-wise equal within a
tolerance.
array_equiv: Returns True if input arrays are shape consistent and all
elements equal.
Examples
--------
>>> np.array_equal([1, 2], [1, 2])
True
>>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
True
>>> np.array_equal([1, 2], [1, 2, 3])
False
>>> np.array_equal([1, 2], [1, 4])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return False
if a1.shape != a2.shape:
return False
return bool(logical_and.reduce(equal(a1,a2).ravel()))
def array_equiv(a1, a2):
"""
Returns True if input arrays are shape consistent and all elements equal.
Shape consistent means they are either the same shape, or one input array
can be broadcasted to create the same shape as the other one.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
out : bool
True if equivalent, False otherwise.
Examples
--------
>>> np.array_equiv([1, 2], [1, 2])
True
>>> np.array_equiv([1, 2], [1, 3])
False
Showing the shape equivalence:
>>> np.array_equiv([1, 2], [[1, 2], [1, 2]])
True
>>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
False
>>> np.array_equiv([1, 2], [[1, 2], [1, 3]])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return False
try:
return bool(logical_and.reduce(equal(a1,a2).ravel()))
except ValueError:
return False
_errdict = {"ignore":ERR_IGNORE,
"warn":ERR_WARN,
"raise":ERR_RAISE,
"call":ERR_CALL,
"print":ERR_PRINT,
"log":ERR_LOG}
_errdict_rev = {}
for key in _errdict.keys():
_errdict_rev[_errdict[key]] = key
del key
def seterr(all=None, divide=None, over=None, under=None, invalid=None):
"""
Set how floating-point errors are handled.
Note that operations on integer scalar types (such as `int16`) are
handled like floating point, and are affected by these settings.
Parameters
----------
all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Set treatment for all types of floating-point errors at once:
- ignore: Take no action when the exception occurs.
- warn: Print a `RuntimeWarning` (via the Python `warnings` module).
- raise: Raise a `FloatingPointError`.
- call: Call a function specified using the `seterrcall` function.
- print: Print a warning directly to ``stdout``.
- log: Record error in a Log object specified by `seterrcall`.
The default is not to change the current behavior.
divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for division by zero.
over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point overflow.
under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point underflow.
invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for invalid floating-point operation.
Returns
-------
old_settings : dict
Dictionary containing the old settings.
See also
--------
seterrcall : Set a callback function for the 'call' mode.
geterr, geterrcall
Notes
-----
The floating-point exceptions are defined in the IEEE 754 standard [1]:
- Division by zero: infinite result obtained from finite numbers.
- Overflow: result too large to be expressed.
- Underflow: result so close to zero that some precision
was lost.
- Invalid operation: result is not an expressible number, typically
indicates that a NaN was produced.
.. [1] http://en.wikipedia.org/wiki/IEEE_754
Examples
--------
>>> old_settings = np.seterr(all='ignore') #seterr to known value
>>> np.seterr(over='raise')
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
'under': 'ignore'}
>>> np.seterr(all='ignore') # reset to default
{'over': 'raise', 'divide': 'ignore', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.int16(32000) * np.int16(3)
30464
>>> old_settings = np.seterr(all='warn', over='raise')
>>> np.int16(32000) * np.int16(3)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: overflow encountered in short_scalars
>>> old_settings = np.seterr(all='print')
>>> np.geterr()
{'over': 'print', 'divide': 'print', 'invalid': 'print', 'under': 'print'}
>>> np.int16(32000) * np.int16(3)
Warning: overflow encountered in short_scalars
30464
"""
pyvals = umath.geterrobj()
old = geterr()
if divide is None: divide = all or old['divide']
if over is None: over = all or old['over']
if under is None: under = all or old['under']
if invalid is None: invalid = all or old['invalid']
maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
(_errdict[over] << SHIFT_OVERFLOW ) +
(_errdict[under] << SHIFT_UNDERFLOW) +
(_errdict[invalid] << SHIFT_INVALID))
pyvals[1] = maskvalue
umath.seterrobj(pyvals)
return old
def geterr():
"""
Get the current way of handling floating-point errors.
Returns
-------
res : dict
A dictionary with keys "divide", "over", "under", and "invalid",
whose values are from the strings "ignore", "print", "log", "warn",
"raise", and "call". The keys represent possible floating-point
exceptions, and the values define how these exceptions are handled.
See Also
--------
geterrcall, seterr, seterrcall
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterr() # default is all set to 'ignore'
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
'under': 'ignore'}
>>> np.arange(3.) / np.arange(3.)
array([ NaN, 1., 1.])
>>> oldsettings = np.seterr(all='warn', over='raise')
>>> np.geterr()
{'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'}
>>> np.arange(3.) / np.arange(3.)
__main__:1: RuntimeWarning: invalid value encountered in divide
array([ NaN, 1., 1.])
"""
maskvalue = umath.geterrobj()[1]
mask = 7
res = {}
val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
res['divide'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_OVERFLOW) & mask
res['over'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_UNDERFLOW) & mask
res['under'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_INVALID) & mask
res['invalid'] = _errdict_rev[val]
return res
def setbufsize(size):
"""
Set the size of the buffer used in ufuncs.
Parameters
----------
size : int
Size of buffer.
"""
if size > 10e6:
raise ValueError, "Buffer size, %s, is too big." % size
if size < 5:
raise ValueError, "Buffer size, %s, is too small." %size
if size % 16 != 0:
raise ValueError, "Buffer size, %s, is not a multiple of 16." %size
pyvals = umath.geterrobj()
old = getbufsize()
pyvals[0] = size
umath.seterrobj(pyvals)
return old
def getbufsize():
"""Return the size of the buffer used in ufuncs.
"""
return umath.geterrobj()[0]
def seterrcall(func):
"""
Set the floating-point error callback function or log object.
There are two ways to capture floating-point error messages. The first
is to set the error-handler to 'call', using `seterr`. Then, set
the function to call using this function.
The second is to set the error-handler to 'log', using `seterr`.
Floating-point errors then trigger a call to the 'write' method of
the provided object.
Parameters
----------
func : callable f(err, flag) or object with write method
Function to call upon floating-point errors ('call'-mode) or
object whose 'write' method is used to log such message ('log'-mode).
The call function takes two arguments. The first is the
type of error (one of "divide", "over", "under", or "invalid"),
and the second is the status flag. The flag is a byte, whose
least-significant bits indicate the status::
[0 0 0 0 invalid over under invalid]
In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
If an object is provided, its write method should take one argument,
a string.
Returns
-------
h : callable, log instance or None
The old error handler.
See Also
--------
seterr, geterr, geterrcall
Examples
--------
Callback upon error:
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
...
>>> saved_handler = np.seterrcall(err_handler)
>>> save_err = np.seterr(all='call')
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([ Inf, Inf, Inf])
>>> np.seterrcall(saved_handler)
<function err_handler at 0x...>
>>> np.seterr(**save_err)
{'over': 'call', 'divide': 'call', 'invalid': 'call', 'under': 'call'}
Log error message:
>>> class Log(object):
... def write(self, msg):
... print "LOG: %s" % msg
...
>>> log = Log()
>>> saved_handler = np.seterrcall(log)
>>> save_err = np.seterr(all='log')
>>> np.array([1, 2, 3]) / 0.0
LOG: Warning: divide by zero encountered in divide
<BLANKLINE>
array([ Inf, Inf, Inf])
>>> np.seterrcall(saved_handler)
<__main__.Log object at 0x...>
>>> np.seterr(**save_err)
{'over': 'log', 'divide': 'log', 'invalid': 'log', 'under': 'log'}
"""
if func is not None and not callable(func):
if not hasattr(func, 'write') or not callable(func.write):
raise ValueError, "Only callable can be used as callback"
pyvals = umath.geterrobj()
old = geterrcall()
pyvals[2] = func
umath.seterrobj(pyvals)
return old
def geterrcall():
"""
Return the current callback function used on floating-point errors.
When the error handling for a floating-point error (one of "divide",
"over", "under", or "invalid") is set to 'call' or 'log', the function
that is called or the log instance that is written to is returned by
`geterrcall`. This function or log instance has been set with
`seterrcall`.
Returns
-------
errobj : callable, log instance or None
The current error handler. If no handler was set through `seterrcall`,
``None`` is returned.
See Also
--------
seterrcall, seterr, geterr
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrcall() # we did not yet set a handler, returns None
>>> oldsettings = np.seterr(all='call')
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
>>> oldhandler = np.seterrcall(err_handler)
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([ Inf, Inf, Inf])
>>> cur_handler = np.geterrcall()
>>> cur_handler is err_handler
True
"""
return umath.geterrobj()[2]
class _unspecified(object):
pass
_Unspecified = _unspecified()
class errstate(object):
"""
errstate(**kwargs)
Context manager for floating-point error handling.
Using an instance of `errstate` as a context manager allows statements in
that context to execute with a known error handling behavior. Upon entering
the context the error handling is set with `seterr` and `seterrcall`, and
upon exiting it is reset to what it was before.
Parameters
----------
kwargs : {divide, over, under, invalid}
Keyword arguments. The valid keywords are the possible floating-point
exceptions. Each keyword should have a string value that defines the
treatment for the particular error. Possible values are
{'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
See Also
--------
seterr, geterr, seterrcall, geterrcall
Notes
-----
The ``with`` statement was introduced in Python 2.5, and can only be used
there by importing it: ``from __future__ import with_statement``. In
earlier Python versions the ``with`` statement is not available.
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> from __future__ import with_statement # use 'with' in Python 2.5
>>> olderr = np.seterr(all='ignore') # Set error handling to known state.
>>> np.arange(3) / 0.
array([ NaN, Inf, Inf])
>>> with np.errstate(divide='warn'):
... np.arange(3) / 0.
...
__main__:2: RuntimeWarning: divide by zero encountered in divide
array([ NaN, Inf, Inf])
>>> np.sqrt(-1)
nan
>>> with np.errstate(invalid='raise'):
... np.sqrt(-1)
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
FloatingPointError: invalid value encountered in sqrt
Outside the context the error handling behavior has not changed:
>>> np.geterr()
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
'under': 'ignore'}
"""
# Note that we don't want to run the above doctests because they will fail
# without a from __future__ import with_statement
def __init__(self, **kwargs):
self.call = kwargs.pop('call',_Unspecified)
self.kwargs = kwargs
def __enter__(self):
self.oldstate = seterr(**self.kwargs)
if self.call is not _Unspecified:
self.oldcall = seterrcall(self.call)
def __exit__(self, *exc_info):
seterr(**self.oldstate)
if self.call is not _Unspecified:
seterrcall(self.oldcall)
def _setdef():
defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT2, None]
umath.seterrobj(defval)
# set the default values
_setdef()
Inf = inf = infty = Infinity = PINF
nan = NaN = NAN
False_ = bool_(False)
True_ = bool_(True)
import fromnumeric
from fromnumeric import *
extend_all(fromnumeric)
| {
"repo_name": "qsnake/numpy",
"path": "numpy/core/numeric.py",
"copies": "1",
"size": "69498",
"license": "bsd-3-clause",
"hash": 4259968191210057700,
"line_mean": 27.6589690722,
"line_max": 86,
"alpha_frac": 0.5698149587,
"autogenerated": false,
"ratio": 3.561260568793236,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9618681504946822,
"avg_score": 0.002478804509282809,
"num_lines": 2425
} |
__all__ = ['newaxis', 'ndarray', 'flatiter', 'ufunc',
'arange', 'array', 'zeros', 'empty', 'broadcast', 'dtype',
'fromstring', 'fromfile', 'frombuffer','newbuffer',
'getbuffer', 'int_asbuffer', 'where', 'argwhere',
'concatenate', 'fastCopyAndTranspose', 'lexsort',
'set_numeric_ops', 'can_cast',
'asarray', 'asanyarray', 'ascontiguousarray', 'asfortranarray',
'isfortran', 'empty_like', 'zeros_like',
'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot',
'alterdot', 'restoredot', 'roll', 'rollaxis', 'cross', 'tensordot',
'array2string', 'get_printoptions', 'set_printoptions',
'array_repr', 'array_str', 'set_string_function',
'little_endian', 'require',
'fromiter', 'array_equal', 'array_equiv',
'indices', 'fromfunction', 'loadtxt', 'savetxt',
'load', 'loads', 'isscalar', 'binary_repr', 'base_repr',
'ones', 'identity', 'allclose', 'compare_chararrays', 'putmask',
'seterr', 'geterr', 'setbufsize', 'getbufsize',
'seterrcall', 'geterrcall', 'errstate', 'flatnonzero',
'Inf', 'inf', 'infty', 'Infinity',
'nan', 'NaN', 'False_', 'True_', 'bitwise_not',
'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', 'ALLOW_THREADS']
import sys
import multiarray
import umath
from umath import *
import numerictypes
from numerictypes import *
bitwise_not = invert
CLIP = multiarray.CLIP
WRAP = multiarray.WRAP
RAISE = multiarray.RAISE
MAXDIMS = multiarray.MAXDIMS
ALLOW_THREADS = multiarray.ALLOW_THREADS
BUFSIZE = multiarray.BUFSIZE
# from Fernando Perez's IPython
def zeros_like(a):
"""Return an array of zeros of the shape and typecode of a.
If you don't explicitly need the array to be zeroed, you should instead
use empty_like(), which is faster as it only allocates memory."""
try:
return zeros(a.shape, a.dtype, a.flags.fnc)
except AttributeError:
try:
wrap = a.__array_wrap__
except AttributeError:
wrap = None
a = asarray(a)
res = zeros(a.shape, a.dtype)
if wrap:
res = wrap(res)
return res
def empty_like(a):
"""Return an empty (uninitialized) array of the shape and typecode of a.
Note that this does NOT initialize the returned array. If you require
your array to be initialized, you should use zeros_like().
"""
try:
return empty(a.shape, a.dtype, a.flags.fnc)
except AttributeError:
try:
wrap = a.__array_wrap__
except AttributeError:
wrap = None
a = asarray(a)
res = empty(a.shape, a.dtype)
if wrap:
res = wrap(res)
return res
# end Fernando's utilities
def extend_all(module):
adict = {}
for a in __all__:
adict[a] = 1
try:
mall = getattr(module, '__all__')
except AttributeError:
mall = [k for k in module.__dict__.keys() if not k.startswith('_')]
for a in mall:
if a not in adict:
__all__.append(a)
extend_all(umath)
extend_all(numerictypes)
newaxis = None
ndarray = multiarray.ndarray
flatiter = multiarray.flatiter
broadcast = multiarray.broadcast
dtype = multiarray.dtype
ufunc = type(sin)
arange = multiarray.arange
array = multiarray.array
zeros = multiarray.zeros
empty = multiarray.empty
fromstring = multiarray.fromstring
fromiter = multiarray.fromiter
fromfile = multiarray.fromfile
frombuffer = multiarray.frombuffer
newbuffer = multiarray.newbuffer
getbuffer = multiarray.getbuffer
int_asbuffer = multiarray.int_asbuffer
where = multiarray.where
concatenate = multiarray.concatenate
fastCopyAndTranspose = multiarray._fastCopyAndTranspose
set_numeric_ops = multiarray.set_numeric_ops
can_cast = multiarray.can_cast
lexsort = multiarray.lexsort
compare_chararrays = multiarray.compare_chararrays
putmask = multiarray.putmask
def asarray(a, dtype=None, order=None):
"""Returns a as an array.
Unlike array(), no copy is performed if a is already an array. Subclasses
are converted to base class ndarray.
"""
return array(a, dtype, copy=False, order=order)
def asanyarray(a, dtype=None, order=None):
"""Returns a as an array, but will pass subclasses through.
"""
return array(a, dtype, copy=False, order=order, subok=1)
def ascontiguousarray(a, dtype=None):
"""Return 'a' as an array contiguous in memory (C order).
"""
return array(a, dtype, copy=False, order='C', ndmin=1)
def asfortranarray(a, dtype=None):
"""Return 'a' as an array laid out in Fortran-order in memory.
"""
return array(a, dtype, copy=False, order='F', ndmin=1)
def require(a, dtype=None, requirements=None):
if requirements is None:
requirements = []
else:
requirements = [x.upper() for x in requirements]
if not requirements:
return asanyarray(a, dtype=dtype)
if 'ENSUREARRAY' in requirements or 'E' in requirements:
subok = 0
else:
subok = 1
arr = array(a, dtype=dtype, copy=False, subok=subok)
copychar = 'A'
if 'FORTRAN' in requirements or \
'F_CONTIGUOUS' in requirements or \
'F' in requirements:
copychar = 'F'
elif 'CONTIGUOUS' in requirements or \
'C_CONTIGUOUS' in requirements or \
'C' in requirements:
copychar = 'C'
for prop in requirements:
if not arr.flags[prop]:
arr = arr.copy(copychar)
break
return arr
def isfortran(a):
"""Returns True if 'a' is arranged in Fortran-order in memory with a.ndim > 1
"""
return a.flags.fnc
def argwhere(a):
"""Return a 2-d array of shape N x a.ndim where each row
is a sequence of indices into a. This sequence must be
converted to a tuple in order to be used to index into a.
"""
return asarray(a.nonzero()).T
def flatnonzero(a):
"""Return indicies that are not-zero in flattened version of a
Equivalent to a.ravel().nonzero()[0]
"""
return a.ravel().nonzero()[0]
_mode_from_name_dict = {'v': 0,
's' : 1,
'f' : 2}
def _mode_from_name(mode):
if isinstance(mode, type("")):
return _mode_from_name_dict[mode.lower()[0]]
return mode
def correlate(a,v,mode='valid'):
"""Return the discrete, linear correlation of 1-D sequences a and v; mode
can be 'valid', 'same', or 'full' to specify the size of the resulting
sequence
"""
mode = _mode_from_name(mode)
return multiarray.correlate(a,v,mode)
def convolve(a,v,mode='full'):
"""Returns the discrete, linear convolution of 1-D sequences a and v; mode
can be 'valid', 'same', or 'full' to specify size of the resulting sequence.
"""
a,v = array(a,ndmin=1),array(v,ndmin=1)
if (len(v) > len(a)):
a, v = v, a
assert len(a) > 0, 'a cannot be empty'
assert len(v) > 0, 'v cannot be empty'
mode = _mode_from_name(mode)
return multiarray.correlate(a,asarray(v)[::-1],mode)
inner = multiarray.inner
dot = multiarray.dot
def outer(a,b):
"""Returns the outer product of two vectors.
result[i,j] = a[i]*b[j] when a and b are vectors.
Will accept any arguments that can be made into vectors.
"""
a = asarray(a)
b = asarray(b)
return a.ravel()[:,newaxis]*b.ravel()[newaxis,:]
def vdot(a, b):
"""Returns the dot product of 2 vectors (or anything that can be made into
a vector).
Note: this is not the same as `dot`, as it takes the conjugate of its first
argument if complex and always returns a scalar."""
return dot(asarray(a).ravel().conj(), asarray(b).ravel())
# try to import blas optimized dot if available
try:
# importing this changes the dot function for basic 4 types
# to blas-optimized versions.
from _dotblas import dot, vdot, inner, alterdot, restoredot
except ImportError:
def alterdot():
pass
def restoredot():
pass
def tensordot(a, b, axes=2):
"""tensordot returns the product for any (ndim >= 1) arrays.
r_{xxx, yyy} = \sum_k a_{xxx,k} b_{k,yyy} where
the axes to be summed over are given by the axes argument.
the first element of the sequence determines the axis or axes
in arr1 to sum over, and the second element in axes argument sequence
determines the axis or axes in arr2 to sum over.
When there is more than one axis to sum over, the corresponding
arguments to axes should be sequences of the same length with the first
axis to sum over given first in both sequences, the second axis second,
and so forth.
If the axes argument is an integer, N, then the last N dimensions of a
and first N dimensions of b are summed over.
"""
try:
iter(axes)
except:
axes_a = range(-axes,0)
axes_b = range(0,axes)
else:
axes_a, axes_b = axes
try:
na = len(axes_a)
axes_a = list(axes_a)
except TypeError:
axes_a = [axes_a]
na = 1
try:
nb = len(axes_b)
axes_b = list(axes_b)
except TypeError:
axes_b = [axes_b]
nb = 1
a, b = asarray(a), asarray(b)
as_ = a.shape
nda = len(a.shape)
bs = b.shape
ndb = len(b.shape)
equal = 1
if (na != nb): equal = 0
else:
for k in xrange(na):
if as_[axes_a[k]] != bs[axes_b[k]]:
equal = 0
break
if axes_a[k] < 0:
axes_a[k] += nda
if axes_b[k] < 0:
axes_b[k] += ndb
if not equal:
raise ValueError, "shape-mismatch for sum"
# Move the axes to sum over to the end of "a"
# and to the front of "b"
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = notin + axes_a
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
newshape_a = (-1, N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = axes_b + notin
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
newshape_b = (N2, -1)
oldb = [bs[axis] for axis in notin]
at = a.transpose(newaxes_a).reshape(newshape_a)
bt = b.transpose(newaxes_b).reshape(newshape_b)
res = dot(at, bt)
return res.reshape(olda + oldb)
def roll(a, shift, axis=None):
"""Roll the elements in the array by 'shift' positions along
the given axis.
"""
a = asanyarray(a)
if axis is None:
n = a.size
reshape=1
else:
n = a.shape[axis]
reshape=0
shift %= n
indexes = concatenate((arange(n-shift,n),arange(n-shift)))
res = a.take(indexes, axis)
if reshape:
return res.reshape(a.shape)
else:
return res
def rollaxis(a, axis, start=0):
"""Return transposed array so that axis is rolled before start.
if a.shape is (3,4,5,6)
rollaxis(a, 3, 1).shape is (3,6,4,5)
rollaxis(a, 2, 0).shape is (5,3,4,6)
rollaxis(a, 1, 3).shape is (3,5,4,6)
rollaxis(a, 1, 4).shape is (3,5,6,4)
"""
n = a.ndim
if axis < 0:
axis += n
if start < 0:
start += n
msg = 'rollaxis: %s (%d) must be >=0 and < %d'
if not (0 <= axis < n):
raise ValueError, msg % ('axis', axis, n)
if not (0 <= start < n+1):
raise ValueError, msg % ('start', start, n+1)
if (axis < start): # it's been removed
start -= 1
if axis==start:
return a
axes = range(0,n)
axes.remove(axis)
axes.insert(start, axis)
return a.transpose(axes)
# fix hack in scipy which imports this function
def _move_axis_to_0(a, axis):
return rollaxis(a, axis, 0)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""Return the cross product of two (arrays of) vectors.
The cross product is performed over the last axis of a and b by default,
and can handle axes with dimensions 2 and 3. For a dimension of 2,
the z-component of the equivalent three-dimensional cross product is
returned.
"""
if axis is not None:
axisa,axisb,axisc=(axis,)*3
a = asarray(a).swapaxes(axisa, 0)
b = asarray(b).swapaxes(axisb, 0)
msg = "incompatible dimensions for cross product\n"\
"(dimension must be 2 or 3)"
if (a.shape[0] not in [2,3]) or (b.shape[0] not in [2,3]):
raise ValueError(msg)
if a.shape[0] == 2:
if (b.shape[0] == 2):
cp = a[0]*b[1] - a[1]*b[0]
if cp.ndim == 0:
return cp
else:
return cp.swapaxes(0, axisc)
else:
x = a[1]*b[2]
y = -a[0]*b[2]
z = a[0]*b[1] - a[1]*b[0]
elif a.shape[0] == 3:
if (b.shape[0] == 3):
x = a[1]*b[2] - a[2]*b[1]
y = a[2]*b[0] - a[0]*b[2]
z = a[0]*b[1] - a[1]*b[0]
else:
x = -a[2]*b[1]
y = a[2]*b[0]
z = a[0]*b[1] - a[1]*b[0]
cp = array([x,y,z])
if cp.ndim == 1:
return cp
else:
return cp.swapaxes(0,axisc)
#Use numarray's printing function
from arrayprint import array2string, get_printoptions, set_printoptions
_typelessdata = [int_, float_, complex_]
if issubclass(intc, int):
_typelessdata.append(intc)
if issubclass(longlong, int):
_typelessdata.append(longlong)
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
if arr.size > 0 or arr.shape==(0,):
lst = array2string(arr, max_line_width, precision, suppress_small,
', ', "array(")
else: # show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(arr.shape),)
typeless = arr.dtype.type in _typelessdata
if arr.__class__ is not ndarray:
cName= arr.__class__.__name__
else:
cName = "array"
if typeless and arr.size:
return cName + "(%s)" % lst
else:
typename=arr.dtype.name
lf = ''
if issubclass(arr.dtype.type, flexible):
if arr.dtype.names:
typename = "%s" % str(arr.dtype)
else:
typename = "'%s'" % str(arr.dtype)
lf = '\n'+' '*len("array(")
return cName + "(%s, %sdtype=%s)" % (lst, lf, typename)
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
return array2string(a, max_line_width, precision, suppress_small, ' ', "", str)
set_string_function = multiarray.set_string_function
set_string_function(array_str, 0)
set_string_function(array_repr, 1)
little_endian = (sys.byteorder == 'little')
def indices(dimensions, dtype=int):
"""Returns an array representing a grid of indices with row-only, and
column-only variation.
"""
dimensions = tuple(dimensions)
N = len(dimensions)
if N == 0:
return array([],dtype=dtype)
res = empty((N,)+dimensions, dtype=dtype)
for i, dim in enumerate(dimensions):
tmp = arange(dim,dtype=dtype)
tmp.shape = (1,)*i + (dim,)+(1,)*(N-i-1)
newdim = dimensions[:i] + (1,)+ dimensions[i+1:]
val = zeros(newdim, dtype)
add(tmp, val, res[i])
return res
def fromfunction(function, shape, **kwargs):
"""Returns an array constructed by calling a function on a tuple of number
grids.
The function should accept as many arguments as the length of shape and
work on array inputs. The shape argument is a sequence of numbers
indicating the length of the desired output for each axis.
The function can also accept keyword arguments (except dtype), which will
be passed through fromfunction to the function itself. The dtype argument
(default float) determines the data-type of the index grid passed to the
function.
"""
dtype = kwargs.pop('dtype', float)
args = indices(shape, dtype=dtype)
return function(*args,**kwargs)
def isscalar(num):
"""Returns True if the type of num is a scalar type.
"""
if isinstance(num, generic):
return True
else:
return type(num) in ScalarType
_lkup = {
'0':'0000',
'1':'0001',
'2':'0010',
'3':'0011',
'4':'0100',
'5':'0101',
'6':'0110',
'7':'0111',
'8':'1000',
'9':'1001',
'a':'1010',
'b':'1011',
'c':'1100',
'd':'1101',
'e':'1110',
'f':'1111',
'A':'1010',
'B':'1011',
'C':'1100',
'D':'1101',
'E':'1110',
'F':'1111',
'L':''}
def binary_repr(num, width=None):
"""Return the binary representation of the input number as a string.
This is equivalent to using base_repr with base 2, but about 25x
faster.
For negative numbers, if width is not given, a - sign is added to the
front. If width is given, the two's complement of the number is
returned, with respect to that width.
"""
sign = ''
if num < 0:
if width is None:
sign = '-'
num = -num
else:
# replace num with its 2-complement
num = 2**width + num
elif num == 0:
return '0'
ostr = hex(num)
bin = ''.join([_lkup[ch] for ch in ostr[2:]])
bin = bin.lstrip('0')
if width is not None:
bin = bin.zfill(width)
return sign + bin
def base_repr (number, base=2, padding=0):
"""Return the representation of a number in the given base.
Base can't be larger than 36.
"""
if number < 0:
raise ValueError("negative numbers not handled in base_repr")
if base > 36:
raise ValueError("bases greater than 36 not handled in base_repr")
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
import math
lnb = math.log(base)
res = padding*chars[0]
if number == 0:
return res + chars[0]
exponent = int (math.log (number)/lnb)
while(exponent >= 0):
term = long(base)**exponent
lead_digit = int(number / term)
res += chars[lead_digit]
number -= term*lead_digit
exponent -= 1
return res
from cPickle import load, loads
_cload = load
_file = file
def load(file):
"""Wrapper around cPickle.load which accepts either a file-like object or
a filename.
"""
if isinstance(file, type("")):
file = _file(file,"rb")
return _cload(file)
# Adapted from matplotlib
def _getconv(dtype):
typ = dtype.type
if issubclass(typ, bool_):
return lambda x: bool(int(x))
if issubclass(typ, integer):
return int
elif issubclass(typ, floating):
return float
elif issubclass(typ, complex):
return complex
else:
return str
def _string_like(obj):
try: obj + ''
except (TypeError, ValueError): return 0
return 1
def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None,
skiprows=0, usecols=None, unpack=False):
"""
Load ASCII data from fname into an array and return the array.
The data must be regular, same number of values in every row
fname can be a filename or a file handle. Support for gzipped files is
automatic, if the filename ends in .gz
See scipy.loadmat to read and write matfiles.
Example usage:
X = loadtxt('test.dat') # data in two columns
t = X[:,0]
y = X[:,1]
Alternatively, you can do the same with "unpack"; see below
X = loadtxt('test.dat') # a matrix of data
x = loadtxt('test.dat') # a single column of data
dtype - the data-type of the resulting array. If this is a
record data-type, the the resulting array will be 1-d and each row will
be interpreted as an element of the array. The number of columns
used must match the number of fields in the data-type in this case.
comments - the character used to indicate the start of a comment
in the file
delimiter is a string-like character used to seperate values in the
file. If delimiter is unspecified or none, any whitespace string is
a separator.
converters, if not None, is a dictionary mapping column number to
a function that will convert that column to a float. Eg, if
column 0 is a date string: converters={0:datestr2num}
skiprows is the number of rows from the top to skip
usecols, if not None, is a sequence of integer column indexes to
extract where 0 is the first column, eg usecols=(1,4,5) to extract
just the 2nd, 5th and 6th columns
unpack, if True, will transpose the matrix allowing you to unpack
into named arguments on the left hand side
t,y = load('test.dat', unpack=True) # for two column data
x,y,z = load('somefile.dat', usecols=(3,5,7), unpack=True)
"""
if _string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname)
else:
fh = file(fname)
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = []
dtype = multiarray.dtype(dtype)
defconv = _getconv(dtype)
converterseq = None
if converters is None:
converters = {}
if dtype.names is not None:
converterseq = [_getconv(dtype.fields[name][0]) \
for name in dtype.names]
for i,line in enumerate(fh):
if i<skiprows: continue
line = line[:line.find(comments)].strip()
if not len(line): continue
vals = line.split(delimiter)
if converterseq is None:
converterseq = [converters.get(j,defconv) \
for j in xrange(len(vals))]
if usecols is not None:
row = [converterseq[j](vals[j]) for j in usecols]
else:
row = [converterseq[j](val) for j,val in enumerate(vals)]
if dtype.names is not None:
row = tuple(row)
X.append(row)
X = array(X, dtype)
r,c = X.shape
if r==1 or c==1:
X.shape = max([r,c]),
if unpack: return X.T
else: return X
# adjust so that fmt can change across columns if desired.
def savetxt(fname, X, fmt='%.18e',delimiter=' '):
"""
Save the data in X to file fname using fmt string to convert the
data to strings
fname can be a filename or a file handle. If the filename ends in .gz,
the file is automatically saved in compressed gzip format. The load()
command understands gzipped files transparently.
Example usage:
save('test.out', X) # X is an array
save('test1.out', (x,y,z)) # x,y,z equal sized 1D arrays
save('test2.out', x) # x is 1D
save('test3.out', x, fmt='%1.4e') # use exponential notation
delimiter is used to separate the fields, eg delimiter ',' for
comma-separated values
"""
if _string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname,'wb')
else:
fh = file(fname,'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = asarray(X)
origShape = None
if len(X.shape)==1:
origShape = X.shape
X.shape = len(X), 1
for row in X:
fh.write(delimiter.join([fmt%val for val in row]) + '\n')
if origShape is not None:
X.shape = origShape
# These are all essentially abbreviations
# These might wind up in a special abbreviations module
def _maketup(descr, val):
dt = dtype(descr)
# Place val in all scalar tuples:
fields = dt.fields
if fields is None:
return val
else:
res = [_maketup(fields[name][0],val) for name in dt.names]
return tuple(res)
def ones(shape, dtype=None, order='C'):
"""Returns an array of the given dimensions which is initialized to all
ones.
"""
a = empty(shape, dtype, order)
try:
a.fill(1)
# Above is faster now after addition of fast loops.
#a = zeros(shape, dtype, order)
#a+=1
except TypeError:
obj = _maketup(dtype, 1)
a.fill(obj)
return a
def identity(n, dtype=None):
"""Returns the identity 2-d array of shape n x n.
identity(n)[i,j] == 1 for all i == j
== 0 for all i != j
"""
a = array([1]+n*[0],dtype=dtype)
b = empty((n,n),dtype=dtype)
# Note that this assignment depends on the convention that since the a
# array is shorter than the flattened b array, then the a array will
# be repeated until it is the appropriate size. Given a's construction,
# this nicely sets the diagonal to all ones.
b.flat = a
return b
def allclose(a, b, rtol=1.e-5, atol=1.e-8):
"""Returns True if all components of a and b are equal subject to given
tolerances.
The relative error rtol must be positive and << 1.0
The absolute error atol usually comes into play for those elements of b that
are very small or zero; it says how small a must be also.
"""
x = array(a, copy=False)
y = array(b, copy=False)
xinf = isinf(x)
if not all(xinf == isinf(y)):
return False
if not any(xinf):
return all(less_equal(absolute(x-y), atol + rtol * absolute(y)))
if not all(x[xinf] == y[xinf]):
return False
x = x[~xinf]
y = y[~xinf]
return all(less_equal(absolute(x-y), atol + rtol * absolute(y)))
def array_equal(a1, a2):
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return 0
if a1.shape != a2.shape:
return 0
return logical_and.reduce(equal(a1,a2).ravel())
def array_equiv(a1, a2):
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return 0
try:
return logical_and.reduce(equal(a1,a2).ravel())
except ValueError:
return 0
_errdict = {"ignore":ERR_IGNORE,
"warn":ERR_WARN,
"raise":ERR_RAISE,
"call":ERR_CALL,
"print":ERR_PRINT,
"log":ERR_LOG}
_errdict_rev = {}
for key in _errdict.keys():
_errdict_rev[_errdict[key]] = key
del key
def seterr(all=None, divide=None, over=None, under=None, invalid=None):
"""Set how floating-point errors are handled.
Valid values for each type of error are the strings
"ignore", "warn", "raise", and "call". Returns the old settings.
If 'all' is specified, values that are not otherwise specified
will be set to 'all', otherwise they will retain their old
values.
Note that operations on integer scalar types (such as int16) are
handled like floating point, and are affected by these settings.
Example:
>>> seterr(over='raise') # doctest: +SKIP
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore', 'under': 'ignore'}
>>> seterr(all='warn', over='raise') # doctest: +SKIP
{'over': 'raise', 'divide': 'ignore', 'invalid': 'ignore', 'under': 'ignore'}
>>> int16(32000) * int16(3) # doctest: +SKIP
Traceback (most recent call last):
File "<stdin>", line 1, in ?
FloatingPointError: overflow encountered in short_scalars
>>> seterr(all='ignore') # doctest: +SKIP
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore', 'under': 'ignore'}
"""
pyvals = umath.geterrobj()
old = geterr()
if divide is None: divide = all or old['divide']
if over is None: over = all or old['over']
if under is None: under = all or old['under']
if invalid is None: invalid = all or old['invalid']
maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
(_errdict[over] << SHIFT_OVERFLOW ) +
(_errdict[under] << SHIFT_UNDERFLOW) +
(_errdict[invalid] << SHIFT_INVALID))
pyvals[1] = maskvalue
umath.seterrobj(pyvals)
return old
def geterr():
"""Get the current way of handling floating-point errors.
Returns a dictionary with entries "divide", "over", "under", and
"invalid", whose values are from the strings
"ignore", "print", "log", "warn", "raise", and "call".
"""
maskvalue = umath.geterrobj()[1]
mask = 7
res = {}
val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
res['divide'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_OVERFLOW) & mask
res['over'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_UNDERFLOW) & mask
res['under'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_INVALID) & mask
res['invalid'] = _errdict_rev[val]
return res
def setbufsize(size):
"""Set the size of the buffer used in ufuncs.
"""
if size > 10e6:
raise ValueError, "Buffer size, %s, is too big." % size
if size < 5:
raise ValueError, "Buffer size, %s, is too small." %size
if size % 16 != 0:
raise ValueError, "Buffer size, %s, is not a multiple of 16." %size
pyvals = umath.geterrobj()
old = getbufsize()
pyvals[0] = size
umath.seterrobj(pyvals)
return old
def getbufsize():
"""Return the size of the buffer used in ufuncs.
"""
return umath.geterrobj()[0]
def seterrcall(func):
"""Set the callback function used when a floating-point error handler
is set to 'call' or the object with a write method for use when
the floating-point error handler is set to 'log'
'func' should be a function that takes two arguments. The first is
type of error ("divide", "over", "under", or "invalid"), and the second
is the status flag (= divide + 2*over + 4*under + 8*invalid).
Returns the old handler.
"""
if func is not None and not callable(func):
if not hasattr(func, 'write') or not callable(func.write):
raise ValueError, "Only callable can be used as callback"
pyvals = umath.geterrobj()
old = geterrcall()
pyvals[2] = func
umath.seterrobj(pyvals)
return old
def geterrcall():
"""Return the current callback function used on floating-point errors.
"""
return umath.geterrobj()[2]
class _unspecified(object):
pass
_Unspecified = _unspecified()
class errstate(object):
"""with errstate(**state): --> operations in following block use given state.
# Set error handling to known state.
>>> _ = seterr(invalid='raise', divide='raise', over='raise', under='ignore')
|>> a = -arange(3)
|>> with errstate(invalid='ignore'):
... print sqrt(a)
[ 0. -1.#IND -1.#IND]
|>> print sqrt(a.astype(complex))
[ 0. +0.00000000e+00j 0. +1.00000000e+00j 0. +1.41421356e+00j]
|>> print sqrt(a)
Traceback (most recent call last):
...
FloatingPointError: invalid encountered in sqrt
|>> with errstate(divide='ignore'):
... print a/0
[0 0 0]
|>> print a/0
Traceback (most recent call last):
...
FloatingPointError: divide by zero encountered in divide
"""
# Note that we don't want to run the above doctests because they will fail
# without a from __future__ import with_statement
def __init__(self, **kwargs):
self.call = kwargs.pop('call',_Unspecified)
self.kwargs = kwargs
def __enter__(self):
self.oldstate = seterr(**self.kwargs)
if self.call is not _Unspecified:
self.oldcall = seterrcall(self.call)
def __exit__(self, *exc_info):
seterr(**self.oldstate)
if self.call is not _Unspecified:
seterrcall(self.oldcall)
def _setdef():
defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT2, None]
umath.seterrobj(defval)
# set the default values
_setdef()
Inf = inf = infty = Infinity = PINF
nan = NaN = NAN
False_ = bool_(False)
True_ = bool_(True)
import fromnumeric
from fromnumeric import *
extend_all(fromnumeric)
| {
"repo_name": "santisiri/popego",
"path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/numpy-1.0.4-py2.5-linux-x86_64.egg/numpy/core/numeric.py",
"copies": "1",
"size": "31685",
"license": "bsd-3-clause",
"hash": -8775957641127733000,
"line_mean": 29.1474785918,
"line_max": 83,
"alpha_frac": 0.6004734101,
"autogenerated": false,
"ratio": 3.4864656690140845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4586939079114084,
"avg_score": null,
"num_lines": null
} |
__all__ = ['newaxis', 'ndarray', 'flatiter', 'ufunc',
'arange', 'array', 'zeros', 'empty', 'broadcast', 'dtype',
'fromstring', 'fromfile', 'frombuffer',
'int_asbuffer', 'where', 'argwhere',
'concatenate', 'fastCopyAndTranspose', 'lexsort',
'set_numeric_ops', 'can_cast',
'asarray', 'asanyarray', 'ascontiguousarray', 'asfortranarray',
'isfortran', 'empty_like', 'zeros_like',
'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot',
'alterdot', 'restoredot', 'roll', 'rollaxis', 'cross', 'tensordot',
'array2string', 'get_printoptions', 'set_printoptions',
'array_repr', 'array_str', 'set_string_function',
'little_endian', 'require',
'fromiter', 'array_equal', 'array_equiv',
'indices', 'fromfunction',
'load', 'loads', 'isscalar', 'binary_repr', 'base_repr',
'ones', 'identity', 'allclose', 'compare_chararrays', 'putmask',
'seterr', 'geterr', 'setbufsize', 'getbufsize',
'seterrcall', 'geterrcall', 'errstate', 'flatnonzero',
'Inf', 'inf', 'infty', 'Infinity',
'nan', 'NaN', 'False_', 'True_', 'bitwise_not',
'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', 'ALLOW_THREADS',
'ComplexWarning']
import sys
import warnings
import multiarray
import umath
from umath import *
import numerictypes
from numerictypes import *
if sys.version_info[0] < 3:
__all__.extend(['getbuffer', 'newbuffer'])
class ComplexWarning(RuntimeWarning):
"""
The warning raised when casting a complex dtype to a real dtype.
As implemented, casting a complex number to a real discards its imaginary
part, but this behavior may not be what the user actually wants.
"""
pass
bitwise_not = invert
CLIP = multiarray.CLIP
WRAP = multiarray.WRAP
RAISE = multiarray.RAISE
MAXDIMS = multiarray.MAXDIMS
ALLOW_THREADS = multiarray.ALLOW_THREADS
BUFSIZE = multiarray.BUFSIZE
ndarray = multiarray.ndarray
flatiter = multiarray.flatiter
broadcast = multiarray.broadcast
dtype = multiarray.dtype
ufunc = type(sin)
# originally from Fernando Perez's IPython
def zeros_like(a):
"""
Return an array of zeros with the same shape and type as a given array.
Equivalent to ``a.copy().fill(0)``.
Parameters
----------
a : array_like
The shape and data-type of `a` define the parameters of
the returned array.
Returns
-------
out : ndarray
Array of zeros with same shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.zeros_like(x)
array([[0, 0, 0],
[0, 0, 0]])
>>> y = np.arange(3, dtype=np.float)
>>> y
array([ 0., 1., 2.])
>>> np.zeros_like(y)
array([ 0., 0., 0.])
"""
if isinstance(a, ndarray):
res = ndarray.__new__(type(a), a.shape, a.dtype, order=a.flags.fnc)
res.fill(0)
return res
try:
wrap = a.__array_wrap__
except AttributeError:
wrap = None
a = asarray(a)
res = zeros(a.shape, a.dtype)
if wrap:
res = wrap(res)
return res
def empty_like(a):
"""
Return a new array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define the parameters of the
returned array.
Returns
-------
out : ndarray
Array of random data with the same shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than the
functions that do set the array values.
Examples
--------
>>> a = ([1,2,3], [4,5,6]) # a is array-like
>>> np.empty_like(a)
array([[-1073741821, -1073741821, 3], #random
[ 0, 0, -1073741821]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], #random
[ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
"""
if isinstance(a, ndarray):
res = ndarray.__new__(type(a), a.shape, a.dtype, order=a.flags.fnc)
return res
try:
wrap = a.__array_wrap__
except AttributeError:
wrap = None
a = asarray(a)
res = empty(a.shape, a.dtype)
if wrap:
res = wrap(res)
return res
# end Fernando's utilities
def extend_all(module):
adict = {}
for a in __all__:
adict[a] = 1
try:
mall = getattr(module, '__all__')
except AttributeError:
mall = [k for k in module.__dict__.keys() if not k.startswith('_')]
for a in mall:
if a not in adict:
__all__.append(a)
extend_all(umath)
extend_all(numerictypes)
newaxis = None
arange = multiarray.arange
array = multiarray.array
zeros = multiarray.zeros
empty = multiarray.empty
fromstring = multiarray.fromstring
fromiter = multiarray.fromiter
fromfile = multiarray.fromfile
frombuffer = multiarray.frombuffer
if sys.version_info[0] < 3:
newbuffer = multiarray.newbuffer
getbuffer = multiarray.getbuffer
int_asbuffer = multiarray.int_asbuffer
where = multiarray.where
concatenate = multiarray.concatenate
fastCopyAndTranspose = multiarray._fastCopyAndTranspose
set_numeric_ops = multiarray.set_numeric_ops
can_cast = multiarray.can_cast
lexsort = multiarray.lexsort
compare_chararrays = multiarray.compare_chararrays
putmask = multiarray.putmask
def asarray(a, dtype=None, order=None):
"""
Convert the input to an array.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('F' for FORTRAN)
memory representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
See Also
--------
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asarray(a)
array([1, 2])
Existing arrays are not copied:
>>> a = np.array([1, 2])
>>> np.asarray(a) is a
True
If `dtype` is set, array is copied only if dtype does not match:
>>> a = np.array([1, 2], dtype=np.float32)
>>> np.asarray(a, dtype=np.float32) is a
True
>>> np.asarray(a, dtype=np.float64) is a
False
Contrary to `asanyarray`, ndarray subclasses are not passed through:
>>> issubclass(np.matrix, np.ndarray)
True
>>> a = np.matrix([[1, 2]])
>>> np.asarray(a) is a
False
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order)
def asanyarray(a, dtype=None, order=None):
"""
Convert the input to a ndarray, but pass ndarray subclasses through.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes scalars, lists, lists of tuples, tuples, tuples of tuples,
tuples of lists and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('F') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray or an ndarray subclass
Array interpretation of `a`. If `a` is an ndarray or a subclass
of ndarray, it is returned as-is and no copy is performed.
See Also
--------
asarray : Similar function which always returns ndarrays.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asanyarray(a)
array([1, 2])
Instances of `ndarray` subclasses are passed through as-is:
>>> a = np.matrix([1, 2])
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order, subok=True)
def ascontiguousarray(a, dtype=None):
"""
Return a contiguous array in memory (C order).
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
Data-type of returned array.
Returns
-------
out : ndarray
Contiguous array of same shape and content as `a`, with type `dtype`
if specified.
See Also
--------
asfortranarray : Convert input to an ndarray with column-major
memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> np.ascontiguousarray(x, dtype=np.float32)
array([[ 0., 1., 2.],
[ 3., 4., 5.]], dtype=float32)
>>> x.flags['C_CONTIGUOUS']
True
"""
return array(a, dtype, copy=False, order='C', ndmin=1)
def asfortranarray(a, dtype=None):
"""
Return an array laid out in Fortran order in memory.
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
By default, the data-type is inferred from the input data.
Returns
-------
out : ndarray
The input `a` in Fortran, or column-major, order.
See Also
--------
ascontiguousarray : Convert input to a contiguous (C order) array.
asanyarray : Convert input to an ndarray with either row or
column-major memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> y = np.asfortranarray(x)
>>> x.flags['F_CONTIGUOUS']
False
>>> y.flags['F_CONTIGUOUS']
True
"""
return array(a, dtype, copy=False, order='F', ndmin=1)
def require(a, dtype=None, requirements=None):
"""
Return an ndarray of the provided type that satisfies requirements.
This function is useful to be sure that an array with the correct flags
is returned for passing to compiled code (perhaps through ctypes).
Parameters
----------
a : array_like
The object to be converted to a type-and-requirement-satisfying array.
dtype : data-type
The required data-type, the default data-type is float64).
requirements : str or list of str
The requirements list can be any of the following
* 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
* 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
* 'ALIGNED' ('A') - ensure a data-type aligned array
* 'WRITEABLE' ('W') - ensure a writable array
* 'OWNDATA' ('O') - ensure an array that owns its own data
See Also
--------
asarray : Convert input to an ndarray.
asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfortranarray : Convert input to an ndarray with column-major
memory order.
ndarray.flags : Information about the memory layout of the array.
Notes
-----
The returned array will be guaranteed to have the listed requirements
by making a copy if needed.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : False
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
>>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
>>> y.flags
C_CONTIGUOUS : False
F_CONTIGUOUS : True
OWNDATA : True
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
"""
if requirements is None:
requirements = []
else:
requirements = [x.upper() for x in requirements]
if not requirements:
return asanyarray(a, dtype=dtype)
if 'ENSUREARRAY' in requirements or 'E' in requirements:
subok = False
else:
subok = True
arr = array(a, dtype=dtype, copy=False, subok=subok)
copychar = 'A'
if 'FORTRAN' in requirements or \
'F_CONTIGUOUS' in requirements or \
'F' in requirements:
copychar = 'F'
elif 'CONTIGUOUS' in requirements or \
'C_CONTIGUOUS' in requirements or \
'C' in requirements:
copychar = 'C'
for prop in requirements:
if not arr.flags[prop]:
arr = arr.copy(copychar)
break
return arr
def isfortran(a):
"""
Returns True if array is arranged in Fortran-order in memory
and dimension > 1.
Parameters
----------
a : ndarray
Input array.
Examples
--------
np.array allows to specify whether the array is written in C-contiguous
order (last index varies the fastest), or FORTRAN-contiguous order in
memory (first index varies the fastest).
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = np.array([[1, 2, 3], [4, 5, 6]], order='FORTRAN')
>>> b
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(b)
True
The transpose of a C-ordered array is a FORTRAN-ordered array.
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = a.T
>>> b
array([[1, 4],
[2, 5],
[3, 6]])
>>> np.isfortran(b)
True
1-D arrays always evaluate as False.
>>> np.isfortran(np.array([1, 2], order='FORTRAN'))
False
"""
return a.flags.fnc
def argwhere(a):
"""
Find the indices of array elements that are non-zero, grouped by element.
Parameters
----------
a : array_like
Input data.
Returns
-------
index_array : ndarray
Indices of elements that are non-zero. Indices are grouped by element.
See Also
--------
where, nonzero
Notes
-----
``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``.
The output of ``argwhere`` is not suitable for indexing arrays.
For this purpose use ``where(a)`` instead.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argwhere(x>1)
array([[0, 2],
[1, 0],
[1, 1],
[1, 2]])
"""
return transpose(asanyarray(a).nonzero())
def flatnonzero(a):
"""
Return indices that are non-zero in the flattened version of a.
This is equivalent to a.ravel().nonzero()[0].
Parameters
----------
a : ndarray
Input array.
Returns
-------
res : ndarray
Output array, containing the indices of the elements of `a.ravel()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
ravel : Return a 1-D array containing the elements of the input array.
Examples
--------
>>> x = np.arange(-2, 3)
>>> x
array([-2, -1, 0, 1, 2])
>>> np.flatnonzero(x)
array([0, 1, 3, 4])
Use the indices of the non-zero elements as an index array to extract
these elements:
>>> x.ravel()[np.flatnonzero(x)]
array([-2, -1, 1, 2])
"""
return a.ravel().nonzero()[0]
_mode_from_name_dict = {'v': 0,
's' : 1,
'f' : 2}
def _mode_from_name(mode):
if isinstance(mode, type("")):
return _mode_from_name_dict[mode.lower()[0]]
return mode
def correlate(a,v,mode='valid',old_behavior=True):
"""
Discrete, linear correlation of two 1-dimensional sequences.
This function is equivalent to
>>> np.convolve(a, v[::-1], mode=mode)
... #doctest: +SKIP
where ``v[::-1]`` is the reverse of `v`.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `convolve` docstring. Note that the default
is `valid`, unlike `convolve`, which uses `full`.
old_behavior : bool
If True, uses the old, numeric behavior (correlate(a,v) == correlate(v,
a), and the conjugate is not taken for complex arrays). If False, uses
the conventional signal processing definition (see note).
See Also
--------
convolve : Discrete, linear convolution of two
one-dimensional sequences.
acorrelate : Discrete correlation following the usual signal processing
definition for complex arrays, and without assuming that
``correlate(a, b) == correlate(b, a)``.
Notes
-----
If `old_behavior` is False, this function computes the correlation as
generally defined in signal processing texts::
z[k] = sum_n a[n] * conj(v[n+k])
with a and v sequences being zero-padded where necessary and conj being
the conjugate.
Examples
--------
>>> np.correlate([1, 2, 3], [0, 1, 0.5])
array([ 3.5])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
array([ 2. , 3.5, 3. ])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
array([ 0.5, 2. , 3.5, 3. , 0. ])
"""
mode = _mode_from_name(mode)
if old_behavior:
warnings.warn("""
The current behavior of correlate is deprecated for 1.4.0, and will be removed
for NumPy 1.5.0.
The new behavior fits the conventional definition of correlation: inputs are
never swapped, and the second argument is conjugated for complex arrays.""",
DeprecationWarning)
return multiarray.correlate(a,v,mode)
else:
return multiarray.correlate2(a,v,mode)
def convolve(a,v,mode='full'):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
The convolution operator is often seen in signal processing, where it
models the effect of a linear time-invariant system on a signal [1]_. In
probability theory, the sum of two independent random variables is
distributed according to the convolution of their individual
distributions.
Parameters
----------
a : (N,) array_like
First one-dimensional input array.
v : (M,) array_like
Second one-dimensional input array.
mode : {'full', 'valid', 'same'}, optional
'full':
By default, mode is 'full'. This returns the convolution
at each point of overlap, with an output shape of (N+M-1,). At
the end-points of the convolution, the signals do not overlap
completely, and boundary effects may be seen.
'same':
Mode `same` returns output of length ``max(M, N)``. Boundary
effects are still visible.
'valid':
Mode `valid` returns output of length
``max(M, N) - min(M, N) + 1``. The convolution product is only given
for points where the signals overlap completely. Values outside
the signal boundary have no effect.
Returns
-------
out : ndarray
Discrete, linear convolution of `a` and `v`.
See Also
--------
scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier
Transform.
scipy.linalg.toeplitz : Used to construct the convolution operator.
Notes
-----
The discrete convolution operation is defined as
.. math:: (f * g)[n] = \\sum_{m = -\\infty}^{\\infty} f[m] g[n - m]
It can be shown that a convolution :math:`x(t) * y(t)` in time/space
is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier
domain, after appropriate padding (padding is necessary to prevent
circular convolution). Since multiplication is more efficient (faster)
than convolution, the function `scipy.signal.fftconvolve` exploits the
FFT to calculate the convolution of large data-sets.
References
----------
.. [1] Wikipedia, "Convolution", http://en.wikipedia.org/wiki/Convolution.
Examples
--------
Note how the convolution operator flips the second array
before "sliding" the two across one another:
>>> np.convolve([1, 2, 3], [0, 1, 0.5])
array([ 0. , 1. , 2.5, 4. , 1.5])
Only return the middle values of the convolution.
Contains boundary effects, where zeros are taken
into account:
>>> np.convolve([1,2,3],[0,1,0.5], 'same')
array([ 1. , 2.5, 4. ])
The two arrays are of the same length, so there
is only one position where they completely overlap:
>>> np.convolve([1,2,3],[0,1,0.5], 'valid')
array([ 2.5])
"""
a,v = array(a, ndmin=1),array(v, ndmin=1)
if (len(v) > len(a)):
a, v = v, a
if len(a) == 0 :
raise ValueError('a cannot be empty')
if len(v) == 0 :
raise ValueError('v cannot be empty')
mode = _mode_from_name(mode)
return multiarray.correlate(a, v[::-1], mode)
def outer(a,b):
"""
Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a, b : array_like, shape (M,), (N,)
First and second input vectors. Inputs are flattened if they
are not already 1-dimensional.
Returns
-------
out : ndarray, shape (M, N)
``out[i, j] = a[i] * b[j]``
References
----------
.. [1] : G. H. Golub and C. F. van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
>>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
>>> im
array([[ 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
[ 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
[ 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
>>> grid = rl + im
>>> grid
array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
[-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],
[-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],
[-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],
[-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])
An example using a "vector" of letters:
>>> x = np.array(['a', 'b', 'c'], dtype=object)
>>> np.outer(x, [1, 2, 3])
array([[a, aa, aaa],
[b, bb, bbb],
[c, cc, ccc]], dtype=object)
"""
a = asarray(a)
b = asarray(b)
return a.ravel()[:,newaxis]*b.ravel()[newaxis,:]
# try to import blas optimized dot if available
try:
# importing this changes the dot function for basic 4 types
# to blas-optimized versions.
from _dotblas import dot, vdot, inner, alterdot, restoredot
except ImportError:
# docstrings are in add_newdocs.py
inner = multiarray.inner
dot = multiarray.dot
def vdot(a, b):
return dot(asarray(a).ravel().conj(), asarray(b).ravel())
def alterdot():
pass
def restoredot():
pass
def tensordot(a, b, axes=2):
"""
Compute tensor dot product along specified axes for arrays >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
``a`` and ``b``, and an array_like object containing two array_like
objects, ``(a_axes, b_axes)``, sum the products of ``a``'s and ``b``'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
dimensions of ``a`` and the first ``N`` dimensions of ``b`` are summed
over.
Parameters
----------
a, b : array_like, len(shape) >= 1
Tensors to "dot".
axes : variable type
* integer_like scalar
Number of axes to sum over (applies to both arrays); or
* array_like, shape = (2,), both elements array_like
Axes to be summed over, first sequence applying to ``a``, second
to ``b``.
See Also
--------
numpy.dot
Notes
-----
When there is more than one axis to sum over - and they are not the last
(first) axes of ``a`` (``b``) - the argument ``axes`` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
Examples
--------
A "traditional" example:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> # A slower but equivalent way of computing the same...
>>> d = np.zeros((5,2))
>>> for i in range(5):
... for j in range(2):
... for k in range(3):
... for n in range(4):
... d[i,j] += a[k,n,i] * b[n,k,j]
>>> c == d
array([[ True, True],
[ True, True],
[ True, True],
[ True, True],
[ True, True]], dtype=bool)
An extended example taking advantage of the overloading of + and \\*:
>>> a = np.array(range(1, 9))
>>> a.shape = (2, 2, 2)
>>> A = np.array(('a', 'b', 'c', 'd'), dtype=object)
>>> A.shape = (2, 2)
>>> a; A
array([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])
array([[a, b],
[c, d]], dtype=object)
>>> np.tensordot(a, A) # third argument default is 2
array([abbcccdddd, aaaaabbbbbbcccccccdddddddd], dtype=object)
>>> np.tensordot(a, A, 1)
array([[[acc, bdd],
[aaacccc, bbbdddd]],
[[aaaaacccccc, bbbbbdddddd],
[aaaaaaacccccccc, bbbbbbbdddddddd]]], dtype=object)
>>> np.tensordot(a, A, 0) # "Left for reader" (result too long to incl.)
array([[[[[a, b],
[c, d]],
...
>>> np.tensordot(a, A, (0, 1))
array([[[abbbbb, cddddd],
[aabbbbbb, ccdddddd]],
[[aaabbbbbbb, cccddddddd],
[aaaabbbbbbbb, ccccdddddddd]]], dtype=object)
>>> np.tensordot(a, A, (2, 1))
array([[[abb, cdd],
[aaabbbb, cccdddd]],
[[aaaaabbbbbb, cccccdddddd],
[aaaaaaabbbbbbbb, cccccccdddddddd]]], dtype=object)
>>> np.tensordot(a, A, ((0, 1), (0, 1)))
array([abbbcccccddddddd, aabbbbccccccdddddddd], dtype=object)
>>> np.tensordot(a, A, ((2, 1), (1, 0)))
array([acccbbdddd, aaaaacccccccbbbbbbdddddddd], dtype=object)
"""
try:
iter(axes)
except:
axes_a = range(-axes,0)
axes_b = range(0,axes)
else:
axes_a, axes_b = axes
try:
na = len(axes_a)
axes_a = list(axes_a)
except TypeError:
axes_a = [axes_a]
na = 1
try:
nb = len(axes_b)
axes_b = list(axes_b)
except TypeError:
axes_b = [axes_b]
nb = 1
a, b = asarray(a), asarray(b)
as_ = a.shape
nda = len(a.shape)
bs = b.shape
ndb = len(b.shape)
equal = True
if (na != nb): equal = False
else:
for k in xrange(na):
if as_[axes_a[k]] != bs[axes_b[k]]:
equal = False
break
if axes_a[k] < 0:
axes_a[k] += nda
if axes_b[k] < 0:
axes_b[k] += ndb
if not equal:
raise ValueError, "shape-mismatch for sum"
# Move the axes to sum over to the end of "a"
# and to the front of "b"
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = notin + axes_a
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
newshape_a = (-1, N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = axes_b + notin
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
newshape_b = (N2, -1)
oldb = [bs[axis] for axis in notin]
at = a.transpose(newaxes_a).reshape(newshape_a)
bt = b.transpose(newaxes_b).reshape(newshape_b)
res = dot(at, bt)
return res.reshape(olda + oldb)
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements that roll beyond the last position are re-introduced at
the first.
Parameters
----------
a : array_like
Input array.
shift : int
The number of places by which elements are shifted.
axis : int, optional
The axis along which elements are shifted. By default, the array
is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : ndarray
Output array, with the same shape as `a`.
See Also
--------
rollaxis : Roll the specified axis backwards, until it lies in a
given position.
Examples
--------
>>> x = np.arange(10)
>>> np.roll(x, 2)
array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> x2 = np.reshape(x, (2,5))
>>> x2
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> np.roll(x2, 1)
array([[9, 0, 1, 2, 3],
[4, 5, 6, 7, 8]])
>>> np.roll(x2, 1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4]])
>>> np.roll(x2, 1, axis=1)
array([[4, 0, 1, 2, 3],
[9, 5, 6, 7, 8]])
"""
a = asanyarray(a)
if axis is None:
n = a.size
reshape = True
else:
n = a.shape[axis]
reshape = False
shift %= n
indexes = concatenate((arange(n-shift,n),arange(n-shift)))
res = a.take(indexes, axis)
if reshape:
return res.reshape(a.shape)
else:
return res
def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
Parameters
----------
a : ndarray
Input array.
axis : int
The axis to roll backwards. The positions of the other axes do not
change relative to one another.
start : int, optional
The axis is rolled until it lies before this position.
Returns
-------
res : ndarray
Output array.
See Also
--------
roll : Roll the elements of an array by a number of positions along a
given axis.
Examples
--------
>>> a = np.ones((3,4,5,6))
>>> np.rollaxis(a, 3, 1).shape
(3, 6, 4, 5)
>>> np.rollaxis(a, 2).shape
(5, 3, 4, 6)
>>> np.rollaxis(a, 1, 4).shape
(3, 5, 6, 4)
"""
n = a.ndim
if axis < 0:
axis += n
if start < 0:
start += n
msg = 'rollaxis: %s (%d) must be >=0 and < %d'
if not (0 <= axis < n):
raise ValueError, msg % ('axis', axis, n)
if not (0 <= start < n+1):
raise ValueError, msg % ('start', start, n+1)
if (axis < start): # it's been removed
start -= 1
if axis==start:
return a
axes = range(0,n)
axes.remove(axis)
axes.insert(start, axis)
return a.transpose(axes)
# fix hack in scipy which imports this function
def _move_axis_to_0(a, axis):
return rollaxis(a, axis, 0)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""
Return the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
are defined by the last axis of `a` and `b` by default, and these axes
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
2, the third component of the input vector is assumed to be zero and the
cross product calculated accordingly. In cases where both input vectors
have dimension 2, the z-component of the cross product is returned.
Parameters
----------
a : array_like
Components of the first vector(s).
b : array_like
Components of the second vector(s).
axisa : int, optional
Axis of `a` that defines the vector(s). By default, the last axis.
axisb : int, optional
Axis of `b` that defines the vector(s). By default, the last axis.
axisc : int, optional
Axis of `c` containing the cross product vector(s). By default, the
last axis.
axis : int, optional
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
Returns
-------
c : ndarray
Vector cross product(s).
Raises
------
ValueError
When the dimension of the vector(s) in `a` and/or `b` does not
equal 2 or 3.
See Also
--------
inner : Inner product
outer : Outer product.
ix_ : Construct index arrays.
Examples
--------
Vector cross-product.
>>> x = [1, 2, 3]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([-3, 6, -3])
One vector with dimension 2.
>>> x = [1, 2]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Equivalently:
>>> x = [1, 2, 0]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Both vectors with dimension 2.
>>> x = [1,2]
>>> y = [4,5]
>>> np.cross(x, y)
-3
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the `right-hand rule`.
>>> x = np.array([[1,2,3], [4,5,6]])
>>> y = np.array([[4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[-3, 6, -3],
[ 3, -6, 3]])
The orientation of `c` can be changed using the `axisc` keyword.
>>> np.cross(x, y, axisc=0)
array([[-3, 3],
[ 6, -6],
[-3, 3]])
Change the vector definition of `x` and `y` using `axisa` and `axisb`.
>>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
>>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[ -6, 12, -6],
[ 0, 0, 0],
[ 6, -12, 6]])
>>> np.cross(x, y, axisa=0, axisb=0)
array([[-24, 48, -24],
[-30, 60, -30],
[-36, 72, -36]])
"""
if axis is not None:
axisa,axisb,axisc=(axis,)*3
a = asarray(a).swapaxes(axisa, 0)
b = asarray(b).swapaxes(axisb, 0)
msg = "incompatible dimensions for cross product\n"\
"(dimension must be 2 or 3)"
if (a.shape[0] not in [2,3]) or (b.shape[0] not in [2,3]):
raise ValueError(msg)
if a.shape[0] == 2:
if (b.shape[0] == 2):
cp = a[0]*b[1] - a[1]*b[0]
if cp.ndim == 0:
return cp
else:
return cp.swapaxes(0, axisc)
else:
x = a[1]*b[2]
y = -a[0]*b[2]
z = a[0]*b[1] - a[1]*b[0]
elif a.shape[0] == 3:
if (b.shape[0] == 3):
x = a[1]*b[2] - a[2]*b[1]
y = a[2]*b[0] - a[0]*b[2]
z = a[0]*b[1] - a[1]*b[0]
else:
x = -a[2]*b[1]
y = a[2]*b[0]
z = a[0]*b[1] - a[1]*b[0]
cp = array([x,y,z])
if cp.ndim == 1:
return cp
else:
return cp.swapaxes(0,axisc)
#Use numarray's printing function
from arrayprint import array2string, get_printoptions, set_printoptions
_typelessdata = [int_, float_, complex_]
if issubclass(intc, int):
_typelessdata.append(intc)
if issubclass(longlong, int):
_typelessdata.append(longlong)
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
"""
Return the string representation of an array.
Parameters
----------
arr : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters split the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero, default is False. Very small
is defined by `precision`, if the precision is 8 then
numbers smaller than 5e-9 are represented as zero.
Returns
-------
string : str
The string representation of an array.
See Also
--------
array_str, array2string, set_printoptions
Examples
--------
>>> np.array_repr(np.array([1,2]))
'array([1, 2])'
>>> np.array_repr(np.ma.array([0.]))
'MaskedArray([ 0.])'
>>> np.array_repr(np.array([], np.int32))
'array([], dtype=int32)'
>>> x = np.array([1e-6, 4e-7, 2, 3])
>>> np.array_repr(x, precision=6, suppress_small=True)
'array([ 0.000001, 0. , 2. , 3. ])'
"""
if arr.size > 0 or arr.shape==(0,):
lst = array2string(arr, max_line_width, precision, suppress_small,
', ', "array(")
else: # show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(arr.shape),)
typeless = arr.dtype.type in _typelessdata
if arr.__class__ is not ndarray:
cName= arr.__class__.__name__
else:
cName = "array"
if typeless and arr.size:
return cName + "(%s)" % lst
else:
typename=arr.dtype.name
lf = ''
if issubclass(arr.dtype.type, flexible):
if arr.dtype.names:
typename = "%s" % str(arr.dtype)
else:
typename = "'%s'" % str(arr.dtype)
lf = '\n'+' '*len("array(")
return cName + "(%s, %sdtype=%s)" % (lst, lf, typename)
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
"""
Return a string representation of the data in an array.
The data in the array is returned as a single string. This function
is similar to `array_repr`, the difference is that `array_repr` also
returns information on the type of array and data type.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using set_printoptions.
suppress_small : bool, optional
Represent very small numbers as zero, default is False. Very small is
defined by precision, if the precision is 8 then numbers smaller than
5e-9 are represented as zero.
See Also
--------
array2string, array_repr, set_printoptions
Examples
--------
>>> np.array_str(np.arange(3))
'[0 1 2]'
"""
return array2string(a, max_line_width, precision, suppress_small, ' ', "", str)
set_string_function = multiarray.set_string_function
set_string_function(array_str, 0)
set_string_function(array_repr, 1)
little_endian = (sys.byteorder == 'little')
def indices(dimensions, dtype=int):
"""
Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0,1,...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : dtype, optional
Data type of the result.
Returns
-------
grid : ndarray
The array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
See Also
--------
mgrid, meshgrid
Notes
-----
The output shape is obtained by prepending the number of dimensions
in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N,r0,...,rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k,i0,i1,...,iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 1, 2],
[0, 1, 2]])
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0, 1, 2],
[4, 5, 6]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
"""
dimensions = tuple(dimensions)
N = len(dimensions)
if N == 0:
return array([],dtype=dtype)
res = empty((N,)+dimensions, dtype=dtype)
for i, dim in enumerate(dimensions):
tmp = arange(dim,dtype=dtype)
tmp.shape = (1,)*i + (dim,)+(1,)*(N-i-1)
newdim = dimensions[:i] + (1,)+ dimensions[i+1:]
val = zeros(newdim, dtype)
add(tmp, val, res[i])
return res
def fromfunction(function, shape, **kwargs):
"""
Construct an array by executing a function over each coordinate.
The resulting array therefore has a value ``fn(x, y, z)`` at
coordinate ``(x, y, z)``.
Parameters
----------
function : callable
The function is called with N parameters, each of which
represents the coordinates of the array varying along a
specific axis. For example, if `shape` were ``(2, 2)``, then
the parameters would be two arrays, ``[[0, 0], [1, 1]]`` and
``[[0, 1], [0, 1]]``. `function` must be capable of operating on
arrays, and should return a scalar value.
shape : (N,) tuple of ints
Shape of the output array, which also determines the shape of
the coordinate arrays passed to `function`.
dtype : data-type, optional
Data-type of the coordinate arrays passed to `function`.
By default, `dtype` is float.
Returns
-------
out : any
The result of the call to `function` is passed back directly.
Therefore the type and shape of `out` is completely determined by
`function`.
See Also
--------
indices, meshgrid
Notes
-----
Keywords other than `shape` and `dtype` are passed to `function`.
Examples
--------
>>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
array([[ True, False, False],
[False, True, False],
[False, False, True]], dtype=bool)
>>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4]])
"""
dtype = kwargs.pop('dtype', float)
args = indices(shape, dtype=dtype)
return function(*args,**kwargs)
def isscalar(num):
"""
Returns True if the type of `num` is a scalar type.
Parameters
----------
num : any
Input argument, can be of any type and shape.
Returns
-------
val : bool
True if `num` is a scalar type, False if it is not.
Examples
--------
>>> np.isscalar(3.1)
True
>>> np.isscalar([3.1])
False
>>> np.isscalar(False)
True
"""
if isinstance(num, generic):
return True
else:
return type(num) in ScalarType
_lkup = {
'0':'0000',
'1':'0001',
'2':'0010',
'3':'0011',
'4':'0100',
'5':'0101',
'6':'0110',
'7':'0111',
'8':'1000',
'9':'1001',
'a':'1010',
'b':'1011',
'c':'1100',
'd':'1101',
'e':'1110',
'f':'1111',
'A':'1010',
'B':'1011',
'C':'1100',
'D':'1101',
'E':'1110',
'F':'1111',
'L':''}
def binary_repr(num, width=None):
"""
Return the binary representation of the input number as a string.
For negative numbers, if width is not given, a minus sign is added to the
front. If width is given, the two's complement of the number is
returned, with respect to that width.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
num : int
Only an integer decimal number can be used.
width : int, optional
The length of the returned string if `num` is positive, the length of
the two's complement if `num` is negative.
Returns
-------
bin : str
Binary representation of `num` or two's complement of `num`.
See Also
--------
base_repr: Return a string representation of a number in the given base
system.
Notes
-----
`binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
faster.
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
>>> np.binary_repr(3)
'11'
>>> np.binary_repr(-3)
'-11'
>>> np.binary_repr(3, width=4)
'0011'
The two's complement is returned when the input number is negative and
width is specified:
>>> np.binary_repr(-3, width=4)
'1101'
"""
sign = ''
if num < 0:
if width is None:
sign = '-'
num = -num
else:
# replace num with its 2-complement
num = 2**width + num
elif num == 0:
return '0'*(width or 1)
ostr = hex(num)
bin = ''.join([_lkup[ch] for ch in ostr[2:]])
bin = bin.lstrip('0')
if width is not None:
bin = bin.zfill(width)
return sign + bin
def base_repr (number, base=2, padding=0):
"""
Return a string representation of a number in the given base system.
Parameters
----------
number : scalar
The value to convert. Only positive values are handled.
base : int, optional
Convert `number` to the `base` number system. The valid range is 2-36,
the default value is 2.
padding : int, optional
Number of zeros padded on the left. Default is 0 (no padding).
Returns
-------
out : str
String representation of `number` in `base` system.
See Also
--------
binary_repr : Faster version of `base_repr` for base 2 that also handles
negative numbers.
Examples
--------
>>> np.base_repr(5)
'101'
>>> np.base_repr(6, 5)
'11'
>>> np.base_repr(7, base=5, padding=3)
'00012'
>>> np.base_repr(10, base=16)
'A'
>>> np.base_repr(32, base=16)
'20'
"""
if number < 0:
raise ValueError("negative numbers not handled in base_repr")
if base > 36:
raise ValueError("bases greater than 36 not handled in base_repr")
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
import math
lnb = math.log(base)
res = padding*chars[0]
if number == 0:
return res + chars[0]
exponent = int (math.log (number)/lnb)
while(exponent >= 0):
term = long(base)**exponent
lead_digit = int(number / term)
res += chars[lead_digit]
number -= term*lead_digit
exponent -= 1
return res
from cPickle import load, loads
_cload = load
_file = open
def load(file):
"""
Wrapper around cPickle.load which accepts either a file-like object or
a filename.
Note that the NumPy binary format is not based on pickle/cPickle anymore.
For details on the preferred way of loading and saving files, see `load`
and `save`.
See Also
--------
load, save
"""
if isinstance(file, type("")):
file = _file(file,"rb")
return _cload(file)
# These are all essentially abbreviations
# These might wind up in a special abbreviations module
def _maketup(descr, val):
dt = dtype(descr)
# Place val in all scalar tuples:
fields = dt.fields
if fields is None:
return val
else:
res = [_maketup(fields[name][0],val) for name in dt.names]
return tuple(res)
def ones(shape, dtype=None, order='C'):
"""
Return a new array of given shape and type, filled with ones.
Please refer to the documentation for `zeros`.
See Also
--------
zeros
Examples
--------
>>> np.ones(5)
array([ 1., 1., 1., 1., 1.])
>>> np.ones((5,), dtype=np.int)
array([1, 1, 1, 1, 1])
>>> np.ones((2, 1))
array([[ 1.],
[ 1.]])
>>> s = (2,2)
>>> np.ones(s)
array([[ 1., 1.],
[ 1., 1.]])
"""
a = empty(shape, dtype, order)
try:
a.fill(1)
# Above is faster now after addition of fast loops.
#a = zeros(shape, dtype, order)
#a+=1
except TypeError:
obj = _maketup(dtype, 1)
a.fill(obj)
return a
def identity(n, dtype=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""
a = zeros((n,n), dtype=dtype)
a.flat[::n+1] = 1
return a
def allclose(a, b, rtol=1.e-5, atol=1.e-8):
"""
Returns True if two arrays are element-wise equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance; False otherwise. If either array contains NaN, then
False is returned.
See Also
--------
all, any, alltrue, sometrue
Notes
-----
If the following equation is element-wise True, then allclose returns
True.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`allclose(a, b)` might be different from `allclose(b, a)` in
some rare cases.
Examples
--------
>>> np.allclose([1e10,1e-7], [1.00001e10,1e-8])
False
>>> np.allclose([1e10,1e-8], [1.00001e10,1e-9])
True
>>> np.allclose([1e10,1e-8], [1.0001e10,1e-9])
False
>>> np.allclose([1.0, np.nan], [1.0, np.nan])
False
"""
x = array(a, copy=False)
y = array(b, copy=False)
xinf = isinf(x)
if not all(xinf == isinf(y)):
return False
if not any(xinf):
return all(less_equal(absolute(x-y), atol + rtol * absolute(y)))
if not all(x[xinf] == y[xinf]):
return False
x = x[~xinf]
y = y[~xinf]
return all(less_equal(absolute(x-y), atol + rtol * absolute(y)))
def array_equal(a1, a2):
"""
True if two arrays have the same shape and elements, False otherwise.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
b : bool
Returns True if the arrays are equal.
See Also
--------
allclose: Returns True if two arrays are element-wise equal within a
tolerance.
array_equiv: Returns True if input arrays are shape consistent and all
elements equal.
Examples
--------
>>> np.array_equal([1, 2], [1, 2])
True
>>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
True
>>> np.array_equal([1, 2], [1, 2, 3])
False
>>> np.array_equal([1, 2], [1, 4])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return False
if a1.shape != a2.shape:
return False
return bool(logical_and.reduce(equal(a1,a2).ravel()))
def array_equiv(a1, a2):
"""
Returns True if input arrays are shape consistent and all elements equal.
Shape consistent means they are either the same shape, or one input array
can be broadcasted to create the same shape as the other one.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
out : bool
True if equivalent, False otherwise.
Examples
--------
>>> np.array_equiv([1, 2], [1, 2])
True
>>> np.array_equiv([1, 2], [1, 3])
False
Showing the shape equivalence:
>>> np.array_equiv([1, 2], [[1, 2], [1, 2]])
True
>>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
False
>>> np.array_equiv([1, 2], [[1, 2], [1, 3]])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return False
try:
return bool(logical_and.reduce(equal(a1,a2).ravel()))
except ValueError:
return False
_errdict = {"ignore":ERR_IGNORE,
"warn":ERR_WARN,
"raise":ERR_RAISE,
"call":ERR_CALL,
"print":ERR_PRINT,
"log":ERR_LOG}
_errdict_rev = {}
for key in _errdict.keys():
_errdict_rev[_errdict[key]] = key
del key
def seterr(all=None, divide=None, over=None, under=None, invalid=None):
"""
Set how floating-point errors are handled.
Note that operations on integer scalar types (such as `int16`) are
handled like floating point, and are affected by these settings.
Parameters
----------
all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Set treatment for all types of floating-point errors at once:
- ignore: Take no action when the exception occurs.
- warn: Print a `RuntimeWarning` (via the Python `warnings` module).
- raise: Raise a `FloatingPointError`.
- call: Call a function specified using the `seterrcall` function.
- print: Print a warning directly to ``stdout``.
- log: Record error in a Log object specified by `seterrcall`.
The default is not to change the current behavior.
divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for division by zero.
over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point overflow.
under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point underflow.
invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for invalid floating-point operation.
Returns
-------
old_settings : dict
Dictionary containing the old settings.
See also
--------
seterrcall : Set a callback function for the 'call' mode.
geterr, geterrcall
Notes
-----
The floating-point exceptions are defined in the IEEE 754 standard [1]:
- Division by zero: infinite result obtained from finite numbers.
- Overflow: result too large to be expressed.
- Underflow: result so close to zero that some precision
was lost.
- Invalid operation: result is not an expressible number, typically
indicates that a NaN was produced.
.. [1] http://en.wikipedia.org/wiki/IEEE_754
Examples
--------
>>> old_settings = np.seterr(all='ignore') #seterr to known value
>>> np.seterr(over='raise')
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
'under': 'ignore'}
>>> np.seterr(all='ignore') # reset to default
{'over': 'raise', 'divide': 'ignore', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.int16(32000) * np.int16(3)
30464
>>> old_settings = np.seterr(all='warn', over='raise')
>>> np.int16(32000) * np.int16(3)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: overflow encountered in short_scalars
>>> old_settings = np.seterr(all='print')
>>> np.geterr()
{'over': 'print', 'divide': 'print', 'invalid': 'print', 'under': 'print'}
>>> np.int16(32000) * np.int16(3)
Warning: overflow encountered in short_scalars
30464
"""
pyvals = umath.geterrobj()
old = geterr()
if divide is None: divide = all or old['divide']
if over is None: over = all or old['over']
if under is None: under = all or old['under']
if invalid is None: invalid = all or old['invalid']
maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
(_errdict[over] << SHIFT_OVERFLOW ) +
(_errdict[under] << SHIFT_UNDERFLOW) +
(_errdict[invalid] << SHIFT_INVALID))
pyvals[1] = maskvalue
umath.seterrobj(pyvals)
return old
def geterr():
"""
Get the current way of handling floating-point errors.
Returns
-------
res : dict
A dictionary with keys "divide", "over", "under", and "invalid",
whose values are from the strings "ignore", "print", "log", "warn",
"raise", and "call". The keys represent possible floating-point
exceptions, and the values define how these exceptions are handled.
See Also
--------
geterrcall, seterr, seterrcall
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterr() # default is all set to 'ignore'
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
'under': 'ignore'}
>>> np.arange(3.) / np.arange(3.)
array([ NaN, 1., 1.])
>>> oldsettings = np.seterr(all='warn', over='raise')
>>> np.geterr()
{'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'}
>>> np.arange(3.) / np.arange(3.)
__main__:1: RuntimeWarning: invalid value encountered in divide
array([ NaN, 1., 1.])
"""
maskvalue = umath.geterrobj()[1]
mask = 7
res = {}
val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
res['divide'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_OVERFLOW) & mask
res['over'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_UNDERFLOW) & mask
res['under'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_INVALID) & mask
res['invalid'] = _errdict_rev[val]
return res
def setbufsize(size):
"""
Set the size of the buffer used in ufuncs.
Parameters
----------
size : int
Size of buffer.
"""
if size > 10e6:
raise ValueError, "Buffer size, %s, is too big." % size
if size < 5:
raise ValueError, "Buffer size, %s, is too small." %size
if size % 16 != 0:
raise ValueError, "Buffer size, %s, is not a multiple of 16." %size
pyvals = umath.geterrobj()
old = getbufsize()
pyvals[0] = size
umath.seterrobj(pyvals)
return old
def getbufsize():
"""Return the size of the buffer used in ufuncs.
"""
return umath.geterrobj()[0]
def seterrcall(func):
"""
Set the floating-point error callback function or log object.
There are two ways to capture floating-point error messages. The first
is to set the error-handler to 'call', using `seterr`. Then, set
the function to call using this function.
The second is to set the error-handler to 'log', using `seterr`.
Floating-point errors then trigger a call to the 'write' method of
the provided object.
Parameters
----------
func : callable f(err, flag) or object with write method
Function to call upon floating-point errors ('call'-mode) or
object whose 'write' method is used to log such message ('log'-mode).
The call function takes two arguments. The first is the
type of error (one of "divide", "over", "under", or "invalid"),
and the second is the status flag. The flag is a byte, whose
least-significant bits indicate the status::
[0 0 0 0 invalid over under invalid]
In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
If an object is provided, its write method should take one argument,
a string.
Returns
-------
h : callable, log instance or None
The old error handler.
See Also
--------
seterr, geterr, geterrcall
Examples
--------
Callback upon error:
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
...
>>> saved_handler = np.seterrcall(err_handler)
>>> save_err = np.seterr(all='call')
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([ Inf, Inf, Inf])
>>> np.seterrcall(saved_handler)
<function err_handler at 0x...>
>>> np.seterr(**save_err)
{'over': 'call', 'divide': 'call', 'invalid': 'call', 'under': 'call'}
Log error message:
>>> class Log(object):
... def write(self, msg):
... print "LOG: %s" % msg
...
>>> log = Log()
>>> saved_handler = np.seterrcall(log)
>>> save_err = np.seterr(all='log')
>>> np.array([1, 2, 3]) / 0.0
LOG: Warning: divide by zero encountered in divide
<BLANKLINE>
array([ Inf, Inf, Inf])
>>> np.seterrcall(saved_handler)
<__main__.Log object at 0x...>
>>> np.seterr(**save_err)
{'over': 'log', 'divide': 'log', 'invalid': 'log', 'under': 'log'}
"""
if func is not None and not callable(func):
if not hasattr(func, 'write') or not callable(func.write):
raise ValueError, "Only callable can be used as callback"
pyvals = umath.geterrobj()
old = geterrcall()
pyvals[2] = func
umath.seterrobj(pyvals)
return old
def geterrcall():
"""
Return the current callback function used on floating-point errors.
When the error handling for a floating-point error (one of "divide",
"over", "under", or "invalid") is set to 'call' or 'log', the function
that is called or the log instance that is written to is returned by
`geterrcall`. This function or log instance has been set with
`seterrcall`.
Returns
-------
errobj : callable, log instance or None
The current error handler. If no handler was set through `seterrcall`,
``None`` is returned.
See Also
--------
seterrcall, seterr, geterr
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrcall() # we did not yet set a handler, returns None
>>> oldsettings = np.seterr(all='call')
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
>>> oldhandler = np.seterrcall(err_handler)
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([ Inf, Inf, Inf])
>>> cur_handler = np.geterrcall()
>>> cur_handler is err_handler
True
"""
return umath.geterrobj()[2]
class _unspecified(object):
pass
_Unspecified = _unspecified()
class errstate(object):
"""
errstate(**kwargs)
Context manager for floating-point error handling.
Using an instance of `errstate` as a context manager allows statements in
that context to execute with a known error handling behavior. Upon entering
the context the error handling is set with `seterr` and `seterrcall`, and
upon exiting it is reset to what it was before.
Parameters
----------
kwargs : {divide, over, under, invalid}
Keyword arguments. The valid keywords are the possible floating-point
exceptions. Each keyword should have a string value that defines the
treatment for the particular error. Possible values are
{'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
See Also
--------
seterr, geterr, seterrcall, geterrcall
Notes
-----
The ``with`` statement was introduced in Python 2.5, and can only be used
there by importing it: ``from __future__ import with_statement``. In
earlier Python versions the ``with`` statement is not available.
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> from __future__ import with_statement # use 'with' in Python 2.5
>>> olderr = np.seterr(all='ignore') # Set error handling to known state.
>>> np.arange(3) / 0.
array([ NaN, Inf, Inf])
>>> with np.errstate(divide='warn'):
... np.arange(3) / 0.
...
__main__:2: RuntimeWarning: divide by zero encountered in divide
array([ NaN, Inf, Inf])
>>> np.sqrt(-1)
nan
>>> with np.errstate(invalid='raise'):
... np.sqrt(-1)
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
FloatingPointError: invalid value encountered in sqrt
Outside the context the error handling behavior has not changed:
>>> np.geterr()
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
'under': 'ignore'}
"""
# Note that we don't want to run the above doctests because they will fail
# without a from __future__ import with_statement
def __init__(self, **kwargs):
self.call = kwargs.pop('call',_Unspecified)
self.kwargs = kwargs
def __enter__(self):
self.oldstate = seterr(**self.kwargs)
if self.call is not _Unspecified:
self.oldcall = seterrcall(self.call)
def __exit__(self, *exc_info):
seterr(**self.oldstate)
if self.call is not _Unspecified:
seterrcall(self.oldcall)
def _setdef():
defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT2, None]
umath.seterrobj(defval)
# set the default values
_setdef()
Inf = inf = infty = Infinity = PINF
nan = NaN = NAN
False_ = bool_(False)
True_ = bool_(True)
import fromnumeric
from fromnumeric import *
extend_all(fromnumeric)
| {
"repo_name": "numpy/datetime",
"path": "numpy/core/numeric.py",
"copies": "3",
"size": "69094",
"license": "bsd-3-clause",
"hash": -2095222668556706600,
"line_mean": 27.492371134,
"line_max": 83,
"alpha_frac": 0.5677193389,
"autogenerated": false,
"ratio": 3.5580616921571657,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5625781031057165,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'ExecutionError',
'NoDispatch',
'InvalidLibraryDefinition',
'CustomSyntaxError'
]
# TODO: BlazeException base class ?
# for Numba
class ExecutionError(Exception):
"""
Raised when we are unable to execute a certain lazy or immediate
expression.
"""
# for the RTS
class NoDispatch(Exception):
def __init__(self, aterm):
self.aterm = aterm
def __str__(self):
return "No implementation for '%r'" % self.aterm
# for the RTS
class InvalidLibraryDefinition(Exception):
pass
#------------------------------------------------------------------------
# Generic Syntax Errors
#------------------------------------------------------------------------
syntax_error = """
File {filename}, line {lineno}
{line}
{pointer}
{error}: {msg}
"""
class CustomSyntaxError(Exception):
"""
Makes datashape parse errors look like Python SyntaxError.
"""
def __init__(self, lexpos, filename, text, msg=None):
self.lexpos = lexpos
self.filename = filename
self.text = text
self.msg = msg or 'invalid syntax'
self.lineno = text.count('\n', 0, lexpos) + 1
# Get the extent of the line with the error
linestart = text.rfind('\n', 0, lexpos)
if linestart < 0:
linestart = 0
else:
linestart = linestart + 1
lineend = text.find('\n', lexpos)
if lineend < 0:
lineend = len(text)
self.line = text[linestart:lineend]
self.col_offset = lexpos - linestart
def __str__(self):
pointer = ' '*self.col_offset + '^'
return syntax_error.format(
filename = self.filename,
lineno = self.lineno,
line = self.line,
pointer = ' '*self.col_offset + '^',
msg = self.msg,
error = self.__class__.__name__,
)
def __repr__(self):
return str(self)
| {
"repo_name": "seibert/blaze-core",
"path": "blaze/error.py",
"copies": "1",
"size": "1982",
"license": "bsd-2-clause",
"hash": -5559749482462343000,
"line_mean": 25.4266666667,
"line_max": 73,
"alpha_frac": 0.515136226,
"autogenerated": false,
"ratio": 4.0531697341513295,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5068305960151329,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Executor',
'PriorityExecutor',
]
import itertools
import logging
import os
import sys
from g1.bases.assertions import ASSERT
from . import actors
from . import futures
from . import queues
LOG = logging.getLogger(__name__)
# Python 3.4 implements PEP 442 for safe ``__del__``.
ASSERT.greater_or_equal(sys.version_info, (3, 4))
class Executor:
_COUNTER = itertools.count(1).__next__
def __init__(
self,
max_executors=0,
*,
queue=None,
name_prefix='',
daemon=None,
):
# In case ``__init__`` raises.
self.queue = None
if max_executors <= 0:
# Use this because Executor is often used to parallelize I/O
# instead of computationally-heavy tasks.
max_executors = max(os.cpu_count(), 1) * 8
if not name_prefix:
names = (
'executor-%02d' % self._COUNTER()
for _ in range(max_executors)
)
else:
names = (
'%s-%02d' % (name_prefix, i) for i in range(max_executors)
)
self.queue = queue if queue is not None else queues.Queue()
self.daemon = daemon
self.stubs = tuple(
actors.Stub(
name=name,
actor=actors.function_caller,
queue=self.queue,
daemon=self.daemon,
) for name in names
)
def __del__(self):
# You have to check whether ``__init__`` raises.
if self.queue is None:
return
num_items = len(self.queue.close(graceful=False))
if num_items:
LOG.warning('finalize: drop %d tasks', num_items)
def __enter__(self):
return self
def __exit__(self, exc_type, *_):
# Or should I use the actual daemon property that actor thread
# has? (It could be inherited from the thread that creates this
# executor.)
graceful = not exc_type and not self.daemon
self.shutdown(graceful)
try:
self.join(None if graceful else actors.NON_GRACE_PERIOD)
except futures.Timeout:
pass
def submit(self, func, *args, **kwargs):
future = futures.Future()
call = actors.MethodCall(
method=func, args=args, kwargs=kwargs, future=future
)
self.queue.put(call)
return future
def shutdown(self, graceful=True):
items = self.queue.close(graceful)
if items:
LOG.warning('drop %d tasks', len(items))
return items
def join(self, timeout=None):
stubs = {stub.future: stub for stub in self.stubs}
for f in futures.as_completed(stubs, timeout):
stub = stubs.pop(f)
exc = f.get_exception()
if exc:
LOG.error('executor crash: %r', stub, exc_info=exc)
if stubs:
LOG.warning('not join %d executors', len(stubs))
raise futures.Timeout
class PriorityExecutor(Executor):
"""PriorityExecutor.
This class is a sub-class of ``Executor`` sorely for inheriting its
implementation, not its interface. You should not treat this as a
sub-type of ``Executor`` (thus Liskov Substitution Principle is not
always applied to this class). However, most of the time this class
should be compatible with ``Executor``.
"""
def __init__(self, *args, **kwargs):
queue = kwargs.get('queue')
default_priority = kwargs.pop('default_priority', None)
ASSERT.xor(queue is None, default_priority is None)
if queue is None:
kwargs['queue'] = ExecutorPriorityQueue(default_priority)
super().__init__(*args, **kwargs)
def submit_with_priority(self, priority, func, *args, **kwargs):
future = futures.Future()
call = actors.MethodCall(
method=func, args=args, kwargs=kwargs, future=future
)
self.queue.put_with_priority(priority, call)
return future
class ExecutorPriorityQueue:
"""Priority queue specifically for ``PriorityExecutor``.
This provides a queue-like interface that is somewhat compatible
with the base ``Executor`` and its actors.
"""
class Item:
__slots__ = ('priority', 'item')
def __init__(self, priority, item):
self.priority = priority
self.item = item
def __lt__(self, other):
return self.priority < other.priority
def __init__(self, default_priority, queue=None):
self._default_priority = default_priority
self._queue = queue if queue is not None else queues.PriorityQueue()
def close(self, graceful=True):
return self._queue.close(graceful=graceful)
def get(self, timeout=None):
return self._queue.get(timeout=timeout).item
def put(self, item, timeout=None):
return self.put_with_priority(
self._default_priority, item, timeout=timeout
)
def put_with_priority(self, priority, item, timeout=None):
return self._queue.put(self.Item(priority, item), timeout=timeout)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/threads/g1/threads/executors.py",
"copies": "1",
"size": "5170",
"license": "mit",
"hash": 5319031289867785000,
"line_mean": 29.4117647059,
"line_max": 76,
"alpha_frac": 0.5839458414,
"autogenerated": false,
"ratio": 4.064465408805032,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5148411250205032,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'ExtractTopography',
]
__displayname__ = 'Subsetting'
import numpy as np
import vtk
from vtk.numpy_interface import dataset_adapter as dsa
import pyvista as pv
from .. import interface
from ..base import FilterBase
# NOTE: internal import - from scipy.spatial import cKDTree
###############################################################################
class ExtractTopography(FilterBase):
"""This filter takes two inputs: any mesh dataset and a set of points for
a topography source. This will add a boolean data array to the cell data of
the input grid on whether that cell should be active (under topographic
layer). A user can also choose to directly extract the data rather than
appending a boolean scalar array via the ``remove`` argument.
Args:
op (str, int, or callable): The operation as a string key, int
index, or callable method
tolerance (float): buffer around the topography surface to include as
part of the decision boundary
offset (float): static value to shift the reference topography surface
ivert (bool): optional to invert the extraction.
remove (bool): Optional parameter to apply a thresholding filter and
return a ``vtkUnstructuredGrid`` object with only the extracted
cells. The ``remove`` option is only available in Python
environments (not available in ParaView). The ``remove`` flag must
be set at the time of instantiation of this algorithm.
This does not actually update the algorithm's output data object
but applies a `PyVista` threshold filter to pass a new data object
after calling ``apply``.
Note:
This currenlty ignores time varying inputs. We can implement time
variance but need to think about how we would like to do that. Should
the topography surface be static and the volumetric data have time
variance?
"""
__displayname__ = 'Extract Topography'
__category__ = 'filter'
def __init__(
self, op='underneath', tolerance=0.001, offset=0.0, invert=False, remove=False
):
FilterBase.__init__(self, nInputPorts=2, inputType='vtkDataSet', nOutputPorts=1)
self._tolerance = tolerance
self._offset = offset
self._invert = invert
self._remove = remove
self._operation = self._underneath
self.set_operation(op)
# CRITICAL for multiple input ports
def FillInputPortInformation(self, port, info):
"""This simply makes sure the user selects the correct inputs"""
typ = 'vtkDataSet'
if port == 1:
typ = 'vtkPointSet' # Make sure topography is some sort of point set
info.Set(self.INPUT_REQUIRED_DATA_TYPE(), typ)
return 1
# THIS IS CRUCIAL to preserve data type through filter
def RequestDataObject(self, request, inInfo, outInfo):
"""Constructs the output data object based on the input data object"""
self.OutputType = self.GetInputData(inInfo, 0, 0).GetClassName()
self.FillOutputPortInformation(0, outInfo.GetInformationObject(0))
return 1
#### Extraction Methods ####
@staticmethod
def _query(topo_points, data_points):
"""Querrys the data points for their closest point on the topography
surface"""
try:
# sklearn's KDTree is faster: use it if available
from sklearn.neighbors import KDTree as Tree
except ImportError:
from scipy.spatial import cKDTree as Tree
tree = Tree(topo_points)
i = tree.query(data_points)[1].ravel()
return topo_points[i]
@staticmethod
def _underneath(topo_points, data_points, tolerance):
"""Extract cells underneath the topography surface"""
comp = ExtractTopography._query(topo_points, data_points)
return np.array(data_points[:, 2] < (comp[:, 2] - tolerance), dtype=int)
@staticmethod
def _intersection(topo_points, data_points, tolerance):
"""Extract cells intersecting the topography surface"""
comp = ExtractTopography._query(topo_points, data_points)
return np.array(np.abs((data_points[:, 2] - comp[:, 2])) < tolerance, dtype=int)
@staticmethod
def get_operations():
"""Returns the extraction operation methods as callable objects in a
dictionary
"""
ops = dict(
underneath=ExtractTopography._underneath,
intersection=ExtractTopography._intersection,
)
return ops
@staticmethod
def get_operation_names():
"""Gets a list of the extraction operation keys
Return:
list(str): the keys for getting the operations
"""
ops = ExtractTopography.get_operations()
return list(ops.keys())
@staticmethod
def get_operation(idx):
"""Gets a extraction operation based on an index in the keys
Return:
callable: the operation method
"""
if isinstance(idx, str):
return ExtractTopography.get_operations()[idx]
n = ExtractTopography.get_operation_names()[idx]
return ExtractTopography.get_operations()[n]
#### Pipeline Methods ####
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
igrid = self.GetInputData(inInfo, 0, 0) # Port 0: grid
topo = self.GetInputData(inInfo, 1, 0) # Port 1: topography
grid = self.GetOutputData(outInfo, 0)
grid.DeepCopy(igrid)
# Perfrom task
ncells = igrid.GetNumberOfCells()
active = np.zeros((ncells), dtype=int)
# Now iterate through the cells in the grid and test if they are beneath the topography
wtopo = dsa.WrapDataObject(topo)
topo_points = np.array(wtopo.Points) # mak sure we do not edit the input
# shift the topography points for the tree
topo_points[:, 2] = topo_points[:, 2] + self._offset
filt = vtk.vtkCellCenters()
filt.SetInputDataObject(igrid)
filt.Update()
data_points = dsa.WrapDataObject(filt.GetOutput(0)).Points
active = self._operation(topo_points, data_points, self._tolerance)
if self._invert:
# NOTE: assumes the given operation produces zeros and ones only
active = 1 - active
# Now add cell data to output
active = interface.convert_array(active, name='Extracted')
grid.GetCellData().AddArray(active)
return 1
def apply(self, data, points):
"""Run the algorithm on the input data using the topography points"""
self.SetInputDataObject(0, data)
self.SetInputDataObject(1, points)
self.Update()
output = pv.wrap(self.GetOutput())
if self._remove:
# NOTE: Assumes the given operation produces zeros and ones only
# Also, this does not update the algorithm's output.
# This only sends a new thresholded dataset to the user.
return output.threshold(0.5, scalars='Extracted')
return output
#### Setters/Getters ####
def set_tolerance(self, tol):
"""Set the tolerance threshold for the querry"""
if self._tolerance != tol:
self._tolerance = tol
self.Modified()
def get_tolerance(self):
"""Get the tolerance threshold for the querry"""
return self._tolerance
def set_offset(self, offset):
"""Sets how far off (in Z dir) to slice the data"""
if self._offset != offset:
self._offset = offset
self.Modified()
def set_invert(self, flag):
"""Sets the boolean flag on whether to invert the extraction."""
if self._invert != flag:
self._invert = flag
self.Modified()
def set_operation(self, op):
"""Set the type of extraction to perform.
Args:
op (str, int, or callable): The operation as a string key, int
index, or callable method
Note:
This can accept a callable method to set a custom operation as long
as its signature is ``<callable>(self, topo_points, data_points)`` and it
strictly produces an integer array of zeros and ones.
"""
if isinstance(op, str):
op = ExtractTopography.get_operations()[op]
elif isinstance(op, int):
op = ExtractTopography.get_operation(op)
if self._operation != op:
self._operation = op
self.Modified()
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/grids/subset.py",
"copies": "1",
"size": "8722",
"license": "bsd-3-clause",
"hash": 2149973573444274700,
"line_mean": 35.6470588235,
"line_max": 95,
"alpha_frac": 0.6224489796,
"autogenerated": false,
"ratio": 4.456821665815023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007926096121706344,
"num_lines": 238
} |
__all__ = [
'Field',
'ForeignKey',
'NumberField',
'StringField',
'ObjectField',
'DateTimeField',
'ListField'
]
from datetime import datetime
import re
class Field(object):
def __init__(self, required=False, default=None, alias='', extra=lambda x:True):
self.alias = alias
self.extra = extra
self.required = required
self.default = default
def init(self, obj):
obj[self.key] = self.default() if callable(self.default) else self.default
def __get__(self, obj, owner):
return obj.get(self.key, self.default)
def __set__(self, obj, value):
obj[self.key] = value
def __delete__(self, obj):
pass
def set_key(self, key):
self.key = self.alias or key
def validate(self, obj):
value = obj.get(self.key, None)
if self.required and not value:
return False
return self.extra(value)
class OneToManyRelation(object):
def __init__(self, connected_class, key):
self.connected_class = connected_class
self.key = key
def __get__(self, obj, owner):
return self.connected_class.Q.filter({self.key:obj['_id']}).fetch()
def __set__(self, obj, value):
pass
def __delete__(self, obj):
pass
class ForeignKey(Field):
def __init__(self, related_model, relation_name='', **kwargs):
super(ForeignKey, self).__init__(**kwargs)
self.related_model = related_model
self.relation_name = relation_name
def __get__(self, obj, owner):
rel_id = obj.get(self.key, 0)
if rel_id:
return self.related_model.Q.get(rel_id)
def __set__(self, obj, value):
if hasattr(value, 'id'):
obj[self.key] = value.id
def __delete__(self, obj):
pass
def install_relation(self, owner_class, class_name):
relation_name = self.relation_name or ('%s_list' % class_name.lower())
setattr(self.related_model, relation_name, OneToManyRelation(owner_class, self.key))
class NumberField(Field):
def __init__(self, default=0, type=int, precision=0, constraints=(None, None), **kwargs):
super(NumberField, self).__init__(**kwargs)
self.type = type
self.precision = precision
self.constraints = constraints
self.default = default
def __get__(self, obj, owner):
value = super(NumberField, self).__get__(obj, owner)
if self.precision:
value = round(value, self.precision)
return self.type(value)
def __set__(self, obj, value):
obj[self.key] = self.type(value)
def validate(self, obj):
if super(NumberField, self).validate(obj):
value = obj[self.key]
_min, _max = self.constraints
return ((_min == None) or (value >= _min)) and ((_max == None) or (value <= _max))
else:
return False
class StringField(Field):
def __init__(self, pattern='', len_constraints=(None, None), **kwargs):
super(StringField, self).__init__(**kwargs)
self.pattern = re.compile(pattern)
self.constraints = len_constraints
def validate(self, obj):
if super(StringField, self).validate(obj):
value = obj[self.key]
print 'pre RE match'
print self.pattern.match(value)
if self.pattern.match(value) is None:
return False
print 'RE match'
_min, _max = self.constraints
return ((_min == None) or (len(value) >= _min)) and ((_max == None) or (len(value) <= _max))
else:
return False
class ObjectField(Field):
def __init__(self, default={}, **kwargs):
super(ObjectField, self).__init__(**kwargs)
self.default = dict(default)
class ListField(Field):
def __init__(self, default=[], **kwargs):
super(ObjectField, self).__init__(**kwargs)
self.default = list(default)
class DateTimeField(Field):
def __init__(self, default=datetime.now, constraints=(None, None), **kwargs):
super(DateTimeField, self).__init__(**kwargs)
self.constraints = constraints
self.default = default
def __set__(self, obj, value):
if isinstance(value, int):
obj[self.key] = datetime.fromtimestamp(value)
elif isinstance(value, str):
v = None
for frmt in [
'%Y-%m-%dT%H:%M:%S.%f', # ISO 8601
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%dT%H:%M:%S', # ISO 8601
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d'
]:
try:
v = datetime.strptime(value, frmt)
except:
pass
if not v:
raise Exception('Unknown datetime format DateTimeField.set()')
obj[self.key] = v
elif isinstance(value, datetime) or value is None:
obj[self.key] = value
else:
raise Exception('Invalid type DateTimeField.set()')
def validate(self, obj):
if super(DateTimeField, self).validate(obj):
value = obj[self.key]
_min, _max = self.constraints
return ((_min == None) or (value >= _min)) and ((_max == None) or (value <= _max))
else:
return False
| {
"repo_name": "veeloox/ramen",
"path": "ramen/db/fields.py",
"copies": "1",
"size": "5422",
"license": "apache-2.0",
"hash": 174904560249059070,
"line_mean": 26.3838383838,
"line_max": 104,
"alpha_frac": 0.5440796754,
"autogenerated": false,
"ratio": 3.9432727272727273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49873524026727273,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'FileAdapter',
'FutureAdapter',
'SocketAdapter',
]
import io
import logging
import os
import socket
import ssl
import weakref
from g1.asyncs.kernels import contexts
from g1.asyncs.kernels import traps
from g1.bases import classes
from g1.bases.assertions import ASSERT
LOG = logging.getLogger(__name__)
class AdapterBase:
def __init__(self, target, fields):
self.__target = target
self.__fields = ASSERT.not_contains(fields, 'target')
__repr__ = classes.make_repr('{self._AdapterBase__target!r}')
def __getattr__(self, name):
if name == 'target':
return self.__target
if name in self.__fields:
return getattr(self.__target, name)
raise AttributeError('disallow accessing field: %s' % name)
def disown(self):
target, self.__target = self.__target, None
return target
class FileAdapter(AdapterBase):
"""File-like adapter.
NOTE: When adapting a file-like object returned by SSL socket
makefile, be careful NOT to use read/readinto (even if you provide
the correct buffer size). For reasons that I have not figured out
yet, the BufferedReader returned by makefile can cause SSL socket to
over-recv, causing the it to hang indefinitely. For now, the
solution is to use readinto1.
NOTE: We do not adapt read1 because in non-blocking mode, read1
returns b'' both when EOF or when no data is available.
"""
PROXIED_FIELDS = frozenset([
'closed',
'detach',
'fileno',
])
def __init__(self, file):
super().__init__(file, self.PROXIED_FIELDS)
self.__file = file
os.set_blocking(self.__file.fileno(), False)
kernel = contexts.get_kernel()
kernel.notify_open(self.__file.fileno())
# Keep a weak reference to kernel because we could call
# `notify_close` in another thread.
self.__kernel = weakref.ref(kernel)
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
def disown(self):
super().disown()
file, self.__file = self.__file, None
return file
async def __call_read(self, func, args):
while True:
try:
ret = func(*args)
if ret is not None:
return ret
except ssl.SSLWantReadError:
pass
await traps.poll_read(self.__file.fileno())
async def read(self, size=-1):
return await self.__call_read(self.__file.read, (size, ))
async def readinto(self, buffer):
return await self.__call_read(self.__file.readinto, (buffer, ))
async def readinto1(self, buffer):
return await self.__call_read(self.__file.readinto1, (buffer, ))
async def write(self, data):
while True:
try:
num_written = self.__file.write(data)
if num_written is not None:
return num_written
except (BlockingIOError, InterruptedError) as exc:
if exc.characters_written > 0:
return exc.characters_written
await traps.poll_write(self.__file.fileno())
async def flush(self):
while True:
try:
return self.__file.flush()
except (BlockingIOError, InterruptedError):
await traps.poll_write(self.__file.fileno())
def close(self):
if self.__file is None:
return # Disowned.
if self.__file.raw is None:
return # Detached.
if self.__file.closed:
return
kernel = self.__kernel()
if kernel is not None:
kernel.notify_close(self.__file.fileno())
try:
self.__file.close()
except (BlockingIOError, InterruptedError) as exc:
# Sadly, there is nothing we can do here since the file has
# been closed and not-yet-flushed-out data are lost. If you
# want absolutely no data loss, you should call ``flush``
# before call ``close``.
LOG.warning('close error: %r', self, exc_info=exc)
class SocketAdapter(AdapterBase):
PROXIED_FIELDS = frozenset([
'bind',
'detach',
'fileno',
'getsockname',
'getsockopt',
'listen',
'setblocking',
'setsockopt',
])
READ_BLOCKED = (BlockingIOError, InterruptedError, ssl.SSLWantReadError)
WRITE_BLOCKED = (BlockingIOError, InterruptedError, ssl.SSLWantWriteError)
def __init__(self, sock):
super().__init__(sock, self.PROXIED_FIELDS)
self.__sock = sock
self.__sock.setblocking(False)
kernel = contexts.get_kernel()
kernel.notify_open(self.__sock.fileno())
# Keep a weak reference to kernel because we could call
# `notify_close` in another thread.
self.__kernel = weakref.ref(kernel)
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
def disown(self):
super().disown()
sock, self.__sock = self.__sock, None
return sock
async def accept(self):
while True:
try:
sock, addr = self.__sock.accept()
return type(self)(sock), addr
except self.READ_BLOCKED:
await traps.poll_read(self.__sock.fileno())
async def connect(self, address):
# ``connect`` may raise ``BlockingIOError`` and we should wait
# until it becomes writeable (but in general, non-blocking
# connect is weird).
try:
self.__sock.connect(address)
except self.WRITE_BLOCKED:
await traps.poll_write(self.__sock.fileno())
errno = self.__sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if errno:
raise OSError(errno, 'err in connect(%r): %r' % (address, self))
if getattr(self.__sock, 'do_handshake_on_connect', False):
await self.do_handshake()
async def do_handshake(self):
while True:
try:
return self.__sock.do_handshake(block=False)
except self.READ_BLOCKED:
await traps.poll_read(self.__sock.fileno())
except self.WRITE_BLOCKED:
await traps.poll_write(self.__sock.fileno())
async def recv(self, buffersize, flags=0):
while True:
try:
return self.__sock.recv(buffersize, flags)
except self.READ_BLOCKED:
await traps.poll_read(self.__sock.fileno())
async def recv_into(self, buffer, nbytes=0, flags=0):
while True:
try:
return self.__sock.recv_into(buffer, nbytes, flags)
except self.READ_BLOCKED:
await traps.poll_read(self.__sock.fileno())
async def send(self, data, flags=0):
while True:
try:
return self.__sock.send(data, flags)
except self.WRITE_BLOCKED:
await traps.poll_write(self.__sock.fileno())
async def sendmsg(self, buffers, *args):
while True:
try:
return self.__sock.sendmsg(buffers, *args)
except self.WRITE_BLOCKED:
await traps.poll_write(self.__sock.fileno())
async def sendfile(self, file, offset=0, count=None):
"""Re-implementation of stdlib's socket.sendfile.
Unlike stdlib's sendfile, this raises TypeError if the file
argument does not look like a regular file object, rather than
falling back to send.
We re-implement stdlib's socket.sendfile because it does not
support non-blocking sockets.
"""
self.__sock._check_sendfile_params(file, offset, count)
# Check whether `file` is a regular file.
try:
in_fd = file.fileno()
except (AttributeError, io.UnsupportedOperation):
raise TypeError('expect a regular file')
try:
file_size = os.fstat(in_fd).st_size
except OSError:
raise TypeError('expect a regular file')
if file_size == 0:
return 0
out_fd = self.__sock.fileno()
num_to_send = file_size if count is None else count
num_sent_total = 0
try:
while num_to_send > 0:
try:
num_sent = os.sendfile(out_fd, in_fd, offset, num_to_send)
except self.WRITE_BLOCKED:
await traps.poll_write(out_fd)
continue
except BrokenPipeError:
# Avoid BrokenPipeError caught by `except OSError`.
raise
except OSError:
if num_sent_total == 0:
# Most likely `file` is not a regular file.
raise TypeError('expect a regular file')
raise
if num_sent == 0:
break # EOF of in_fd.
offset += num_sent
num_sent_total += num_sent
num_to_send -= num_sent
return num_sent_total
finally:
if num_sent_total > 0 and hasattr(file, 'seek'):
file.seek(offset)
def close(self):
if self.__sock is None:
return # Disowned.
fd = self.__sock.fileno()
if fd >= 0:
kernel = self.__kernel()
if kernel is not None:
kernel.notify_close(fd)
# I assume that ``socket.close`` does not flush out data, and
# thus never raises ``BlockingIOError``, etc.
self.__sock.close()
class FutureAdapter(AdapterBase):
PROXIED_FIELDS = frozenset([
'is_completed',
'add_callback',
'catching_exception',
'set_result',
'set_exception',
'set_finalizer',
])
def __init__(self, future):
super().__init__(future, self.PROXIED_FIELDS)
self.__future = future
async def join(self):
if self.__future.is_completed():
return
# Since the callback could be fired from another thread, which
# may not have the right kernel object in its context, we should
# get the right kernel object from the context here, and pass it
# to the callback function.
kernel = contexts.get_kernel()
callback = lambda: kernel.unblock(self.__future)
await traps.block(
self.__future,
lambda: self.__future.
add_callback(lambda _: kernel.post_callback(callback)),
)
ASSERT.true(self.__future.is_completed())
async def get_result(self):
await self.join()
return self.__future.get_result(timeout=0)
async def get_exception(self):
await self.join()
return self.__future.get_exception(timeout=0)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/asyncs/bases/g1/asyncs/bases/adapters.py",
"copies": "1",
"size": "10995",
"license": "mit",
"hash": -3130149115248846000,
"line_mean": 31.5295857988,
"line_max": 78,
"alpha_frac": 0.5601637108,
"autogenerated": false,
"ratio": 4.215874233128834,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5276037943928834,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'FileLock',
]
import errno
import fcntl
import os
from garage import scripts
from garage.assertions import ASSERT
class FileLock:
"""Non-reentrant, non-blocking file lock."""
def __init__(self, lock_file_path):
self._lock_file_path = lock_file_path
self._fd = None
@property
def locked(self):
return self._fd is not None
def acquire(self):
if scripts.is_dry_run():
return True
ASSERT.false(self.locked)
if not self._lock_file_path.exists():
with scripts.using_sudo():
scripts.mkdir(self._lock_file_path.parent)
scripts.execute(['touch', self._lock_file_path])
fd = os.open(str(self._lock_file_path), os.O_RDONLY)
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except BlockingIOError as exc:
if exc.errno != errno.EWOULDBLOCK:
raise
return False
else:
fd, self._fd = None, fd
return True
finally:
if fd is not None:
os.close(fd)
def release(self):
if scripts.is_dry_run():
return
ASSERT.true(self.locked)
fd, self._fd = self._fd, None
try:
fcntl.flock(fd, fcntl.LOCK_UN)
finally:
os.close(fd)
| {
"repo_name": "clchiou/garage",
"path": "py/ops/ops/onboard/locks.py",
"copies": "1",
"size": "1374",
"license": "mit",
"hash": 2040652302834215200,
"line_mean": 22.2881355932,
"line_max": 64,
"alpha_frac": 0.5356622999,
"autogenerated": false,
"ratio": 3.7747252747252746,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48103875746252744,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'FileLock',
'NotLocked',
'acquire_exclusive',
'acquiring_exclusive',
'acquiring_shared',
'try_acquire_exclusive',
'is_locked_by_other',
]
import contextlib
import errno
import fcntl
import os
from g1.bases.assertions import ASSERT
class NotLocked(Exception):
"""Raise when file lock cannot be acquired."""
class FileLock:
def __init__(self, path, *, close_on_exec=True):
fd = os.open(path, os.O_RDONLY)
try:
# Actually, CPython's os.open always sets O_CLOEXEC.
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
if close_on_exec:
new_flags = flags | fcntl.FD_CLOEXEC
else:
new_flags = flags & ~fcntl.FD_CLOEXEC
if new_flags != flags:
fcntl.fcntl(fd, fcntl.F_SETFD, new_flags)
except:
os.close(fd)
raise
self._fd = fd
def acquire_shared(self):
self._acquire(fcntl.LOCK_SH)
def acquire_exclusive(self):
self._acquire(fcntl.LOCK_EX)
def _acquire(self, operation):
ASSERT.not_none(self._fd)
# TODO: Should we add a retry here?
try:
fcntl.flock(self._fd, operation | fcntl.LOCK_NB)
except BlockingIOError as exc:
if exc.errno != errno.EWOULDBLOCK:
raise
raise NotLocked from None
def release(self):
"""Release file lock.
It is safe to call release even if lock has not been acquired.
"""
ASSERT.not_none(self._fd)
fcntl.flock(self._fd, fcntl.LOCK_UN)
def close(self):
if self._fd is not None:
os.close(self._fd)
self._fd = None
@contextlib.contextmanager
def acquiring_shared(path):
lock = FileLock(path)
try:
lock.acquire_shared()
yield lock
finally:
lock.release()
lock.close()
@contextlib.contextmanager
def acquiring_exclusive(path):
lock = FileLock(path)
try:
lock.acquire_exclusive()
yield lock
finally:
lock.release()
lock.close()
def try_acquire_exclusive(path):
lock = FileLock(path)
try:
lock.acquire_exclusive()
except NotLocked:
lock.close()
return None
else:
return lock
def acquire_exclusive(path):
return ASSERT.not_none(try_acquire_exclusive(path))
def is_locked_by_other(path):
lock = try_acquire_exclusive(path)
if lock:
lock.release()
lock.close()
return False
else:
return True
| {
"repo_name": "clchiou/garage",
"path": "py/g1/files/g1/files/locks.py",
"copies": "1",
"size": "2592",
"license": "mit",
"hash": 3342851420796753400,
"line_mean": 21.5391304348,
"line_max": 70,
"alpha_frac": 0.5733024691,
"autogenerated": false,
"ratio": 3.79502196193265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.486832443103265,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'fill_micropolygon_mesh'
]
from pathlib import Path
from .util import memoize
import numpy as np
import pyopencl as cl
def fill_micropolygon_mesh(mesh, tile):
from .capi import generate_numpy_begin, print_vertices
rows, columns = mesh.buffer.shape
buffer_ptr = generate_numpy_begin(mesh.buffer)
print_vertices(buffer_ptr, rows * columns)
mf = cl.mem_flags
context = cl_context()
queue = cl_queue()
mesh_buffer_g = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=mesh.buffer)
mesh_bounds_g = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=mesh.bounds)
coordinate_g = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=tile.coordinate_image)
tile_g = cl.Buffer(context, mf.WRITE_ONLY, tile.buffer.nbytes)
program = cl_program()
mesh_rows, mesh_columns = mesh.buffer.shape
kernel = program.fill_micropolygon_mesh
kernel.set_scalar_arg_dtypes([ None, None, np.int32, np.int32, None, None ])
kernel(queue, tile.buffer.shape, None,
mesh_buffer_g, mesh_bounds_g,
mesh_rows, mesh_columns,
coordinate_g, tile_g
);
cl.enqueue_copy(queue, tile.buffer, tile_g)
@memoize
def cl_context():
return cl.create_some_context()
@memoize
def cl_queue():
return cl.CommandQueue(cl_context())
@memoize
def cl_program():
here = Path(__file__).parent
source_path = here / 'opencl_src' / 'opencl_api.cl'
with source_path.open() as fd:
source = fd.read()
program = cl.Program(cl_context(), source).build()
return program
| {
"repo_name": "bracket/rasterizer",
"path": "handsome/opencl_api.py",
"copies": "2",
"size": "1605",
"license": "bsd-2-clause",
"hash": 5815364551026163000,
"line_mean": 22.2608695652,
"line_max": 102,
"alpha_frac": 0.6635514019,
"autogenerated": false,
"ratio": 3.2035928143712575,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48671442162712575,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Fixture',
]
import contextlib
import subprocess
class Fixture:
@staticmethod
def check_shared(path):
return _check_file_lock(path, '--shared')
@staticmethod
def check_exclusive(path):
return _check_file_lock(path, '--exclusive')
@staticmethod
def using_shared(path):
return _using_file_lock(path, '--shared')
@staticmethod
def using_exclusive(path):
return _using_file_lock(path, '--exclusive')
def _check_file_lock(path, mode):
cmd = ['flock', '--nonblock', mode, str(path), 'true']
result = subprocess.run(cmd, check=False)
if result.returncode == 0:
return True
elif result.returncode == 1:
return False
else:
raise subprocess.CalledProcessError(result.returncode, result.args)
@contextlib.contextmanager
def _using_file_lock(path, mode):
cmd = ['flock', '--nonblock', mode, str(path), 'bash', '-c', 'read']
with subprocess.Popen(cmd, stdin=subprocess.PIPE) as proc:
try:
proc.wait(0.01) # Wait for ``flock`` to start up.
except subprocess.TimeoutExpired:
pass
else:
raise subprocess.CalledProcessError(proc.poll(), proc.args)
try:
yield
except:
proc.kill()
raise
else:
proc.stdin.write(b'\n')
proc.stdin.flush()
proc.wait()
returncode = proc.poll()
if returncode:
raise subprocess.CalledProcessError(returncode, proc.args)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/devtools/tests/g1/devtools/tests/filelocks.py",
"copies": "1",
"size": "1568",
"license": "mit",
"hash": 4484708845268428000,
"line_mean": 25.1333333333,
"line_max": 75,
"alpha_frac": 0.5841836735,
"autogenerated": false,
"ratio": 4.083333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5167517006833333,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Future',
]
import logging
from g1.asyncs.kernels import errors
from g1.bases import classes
from g1.bases.assertions import ASSERT
from . import locks
LOG = logging.getLogger(__name__)
class Future:
"""Asynchronous future.
Generally a task object is sufficient for most use cases, but in the
rare cases that you do need a future object, here it is.
Also, this future class is compatible with task; it can be used in
CompletionQueue.
"""
def __init__(self):
self._completed = locks.Event()
self._result = None
self._exception = None
self._callbacks = []
self._consumed = False
self._finalizer = None
def __del__(self):
if not (
self._completed.is_set() and self._exception is None
and not self._consumed
):
return
if self._finalizer is not None:
try:
self._finalizer(self._result)
except BaseException:
LOG.exception('finalizer error')
return
# Make a special case for None.
if self._result is None:
return
LOG.warning(
'future is garbage-collected but result is never consumed: %s',
# Call repr to format self here to avoid resurrecting self.
repr(self),
)
__repr__ = classes.make_repr(
'{state} {self._result!r} {self._exception!r}',
state=lambda self: 'completed'
if self.is_completed() else 'uncompleted',
)
def is_completed(self):
return self._completed.is_set()
async def get_result(self):
await self.join()
return self.get_result_nonblocking()
async def get_exception(self):
await self.join()
return self.get_exception_nonblocking()
def get_result_nonblocking(self):
ASSERT.true(self.is_completed())
self._consumed = True
if self._exception:
raise self._exception
return self._result
def get_exception_nonblocking(self):
ASSERT.true(self.is_completed())
self._consumed = True
return self._exception
def set_finalizer(self, finalizer):
"""Set finalizer.
The finalizer is called when future's result is set but is never
consumed. You may use finalizer to release the result object.
"""
self._finalizer = finalizer
def set_result(self, result):
self._set_result_or_exception(result, None)
def set_exception(self, exception):
self._set_result_or_exception(None, exception)
def _set_result_or_exception(self, result, exception):
if self.is_completed():
if exception:
LOG.error('ignore exception: %r', self, exc_info=exception)
else:
LOG.error('ignore result: %r, %r', self, result)
return
self._result = result
self._exception = exception
self._completed.set()
callbacks, self._callbacks = self._callbacks, None
for callback in callbacks:
self._call_callback(callback)
#
# Task-compatibility interface.
#
async def join(self):
await self._completed.wait()
def cancel(self):
self.set_exception(errors.Cancelled())
def add_callback(self, callback):
if self.is_completed():
self._call_callback(callback)
else:
self._callbacks.append(callback)
def _call_callback(self, callback):
try:
callback(self)
except Exception:
LOG.exception('callback err: %r, %r', self, callback)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/asyncs/bases/g1/asyncs/bases/futures.py",
"copies": "1",
"size": "3685",
"license": "mit",
"hash": 6637384362495461000,
"line_mean": 26.9166666667,
"line_max": 75,
"alpha_frac": 0.5869742198,
"autogenerated": false,
"ratio": 4.294871794871795,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5381846014671794,
"avg_score": null,
"num_lines": null
} |
__all__ = [
"gatekeeper_check",
"block_default_avatars",
"block_bots",
"minimum_creation_time",
"block_all",
"username_regex",
]
import datetime
import functools
import inspect
import re
from typing import Any, Dict, List
import discord
from .core import Bounce, Report
CheckOptions = Dict[str, Any]
def convert_options(check, parameters, options: CheckOptions) -> List[Any]:
converted = {}
for index, (name, param) in enumerate(parameters.items()):
# do not attempt to convert the first parameter
if index == 0:
continue
try:
value = options[name]
except KeyError:
if param.default is not inspect.Parameter.empty:
# this parameter is optional, continue
continue
raise Report(f"`{check.__name__}` is missing the `{name}` option.")
annotation = param.annotation
if annotation is inspect.Parameter.empty or isinstance(value, annotation):
# just add the param if we don't need to convert or if the value is
# already the desired type
converted[name] = value
else:
# convert the value by calling the annotation
converted[name] = annotation(value)
return converted
def gatekeeper_check(func):
"""Register a function as a Gatekeeper check."""
@functools.wraps(func)
async def wrapped(member: discord.Member, options: CheckOptions) -> None:
parameters = inspect.signature(func).parameters
# only pass the options dict to the function if it accepts it
if len(parameters) == 1:
await discord.utils.maybe_coroutine(func, member)
else:
converted_options = convert_options(func, parameters, options)
await discord.utils.maybe_coroutine(func, member, **converted_options)
return wrapped
@gatekeeper_check
def block_default_avatars(member: discord.Member):
if member.avatar is None:
raise Bounce("Has no avatar")
@gatekeeper_check
def block_bots(member: discord.Member):
if member.bot:
raise Bounce("Is a bot")
@gatekeeper_check
def minimum_creation_time(member: discord.Member, *, minimum_age: int):
age = (datetime.datetime.utcnow() - member.created_at).total_seconds()
if age < minimum_age:
raise Bounce(f"Account too young ({age} < {minimum_age})")
@gatekeeper_check
def block_all(_member: discord.Member):
raise Bounce("Blocking all users")
@gatekeeper_check
def username_regex(member: discord.Member, *, regex: str, case_sensitive: bool = True):
flags = 0 if case_sensitive else re.I
try:
if re.search(regex, member.name, flags):
raise Bounce("Username matched regex")
except re.error as err:
raise Report(f"Invalid regex. (`{err}`)")
| {
"repo_name": "sliceofcode/dogbot",
"path": "dog/ext/gatekeeper/checks.py",
"copies": "2",
"size": "2854",
"license": "mit",
"hash": -5400498281103370000,
"line_mean": 27.2574257426,
"line_max": 87,
"alpha_frac": 0.6450595655,
"autogenerated": false,
"ratio": 4.008426966292135,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5653486531792136,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'gen_toy',
'gen_toyn',
]
import numpy as np
from ._libstat import compute_cdf, invert_cdf, _vector_apply
from util import describe
import numpy.random as npr
from matplotlib import pyplot as plt
from probfit_warnings import SmallIntegralWarning
from warnings import warn
def gen_toyn(f, nsample, ntoy, bound, accuracy=10000, quiet=True, **kwd):
"""
just alias of gentoy for nample and then reshape to ntoy,nsample)
:param f:
:param nsample:
:param bound:
:param accuracy:
:param quiet:
:param kwd:
:return:
"""
return gen_toy(f, nsample*ntoy, bound, accuracy, quiet, **kwd).reshape((ntoy, nsample))
def gen_toy(f, nsample, bound, accuracy=10000, quiet=True, **kwd):
"""
generate ntoy
:param f:
:param nsample:
:param ntoy:
:param bound:
:param accuracy:
:param quiet:
:param kwd: the rest of keyword argument will be passed to f
:return: numpy.ndarray
"""
#based on inverting cdf this is fast but you will need to give it a reasonable range
#unlike roofit which is based on accept reject
vnames = describe(f)
if not quiet:
print vnames
my_arg = [kwd[v] for v in vnames[1:]]
#random number
#if accuracy is None: accuracy=10*numtoys
r = npr.random_sample(nsample)
x = np.linspace(bound[0], bound[1], accuracy)
pdf = _vector_apply(f, x, tuple(my_arg))
cdf = compute_cdf(pdf, x)
if cdf[-1] < 0.01:
warn(SmallIntegralWarning('Integral for given funcition is'
' really low. Did you give it a reasonable range?'))
cdfnorm = cdf[-1]
cdf /= cdfnorm
#now convert that to toy
ret = invert_cdf(r, cdf, x)
if not quiet:
#move this to plotting
plt.figure()
plt.title('comparison')
numbin = 100
h, e = np.histogram(ret, bins=numbin)
mp = (e[1:]+e[:-1])/2.
err = np.sqrt(h)
plt.errorbar(mp, h, err, fmt='.b')
bw = e[1] - e[0]
y = pdf * len(ret) / cdfnorm * bw
ylow = y + np.sqrt(y)
yhigh = y - np.sqrt(y)
plt.plot(x, y, label='pdf', color='r')
plt.fill_between(x, yhigh, ylow, color='g', alpha=0.2)
plt.grid(True)
plt.xlim(bound)
plt.ylim(ymin=0)
return ret
| {
"repo_name": "piti118/probfit",
"path": "probfit/toy.py",
"copies": "2",
"size": "2294",
"license": "mit",
"hash": -2412388403238895000,
"line_mean": 27.3209876543,
"line_max": 91,
"alpha_frac": 0.6006974717,
"autogenerated": false,
"ratio": 3.217391304347826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9805160043555814,
"avg_score": 0.002585746498402356,
"num_lines": 81
} |
__all__ = (
'get_aql_info',
'dump_aql_info',
)
# ==============================================================================
class AqlInfo (object):
__slots__ = (
'name',
'module',
'description',
'version',
'date',
'url',
'license',
)
# -----------------------------------------------------------
def __init__(self):
self.name = "Aqualid"
self.module = "aqualid"
self.description = "General purpose build system."
self.version = "0.7"
self.date = None
self.url = 'https://github.com/aqualid'
self.license = "MIT License"
# -----------------------------------------------------------
def dump(self):
result = "{name} {version}".format(
name=self.name, version=self.version)
if self.date:
result += ' ({date})'.format(date=self.date)
result += "\n"
result += self.description
result += "\nSite: %s" % self.url
return result
# -----------------------------------------------------------
_AQL_VERSION_INFO = AqlInfo()
# ==============================================================================
def get_aql_info():
return _AQL_VERSION_INFO
# ==============================================================================
def dump_aql_info():
return _AQL_VERSION_INFO.dump()
| {
"repo_name": "aqualid/aqualid",
"path": "aql/main/aql_info.py",
"copies": "1",
"size": "1418",
"license": "mit",
"hash": 1148452979871103000,
"line_mean": 23.0338983051,
"line_max": 80,
"alpha_frac": 0.3519040903,
"autogenerated": false,
"ratio": 4.574193548387097,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5426097638687097,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'get_axes_dimension',
'assert_axes_dimension',
'width_to_dict',
'size_to_sizedict',
]
def get_axes_dimension(axes):
"""Returns the number of dimensions of a matplotlib axes object.
Parameters
----------
axes : object
The matplotlib axes object.
Returns
-------
int
The number of dimensions of a matplotlib axes object.
"""
if hasattr(axes, 'get_zlim'):
return 3
else:
return 2
def assert_axes_dimension(axes, dim):
"""Asserts if the number of dimensions of a matplotlib axes equals a given dim.
Parameters
----------
axes : object
The matplotlib axes object to assert.
dim : int
The numbers of dimensions to assert with.
Returns
-------
bool
True if the axes object has dim dimensions.
"""
assert get_axes_dimension(axes) == dim, 'The provided axes are not {0}D.'.format(dim)
def width_to_dict(width, dictkeys, defval=None):
width = width or defval
if isinstance(width, (int, float)):
return dict((key, width) for key in dictkeys)
if isinstance(width, dict):
for k, w in width.items():
if isinstance(w, (int, float)):
width[k] = w
return dict((key, width.get(key, defval)) for key in dictkeys)
raise Exception('This is not a valid width format: {0}'.format(type(width)))
def size_to_sizedict(size, dictkeys, defval=None):
size = size or defval
if isinstance(size, (int, float)):
return dict((key, size) for key in dictkeys)
if isinstance(size, dict):
for k, s in size.items():
if isinstance(s, (int, float)):
size[k] = s
return dict((key, size.get(key, defval)) for key in dictkeys)
raise Exception('This is not a valid size format: {0}'.format(type(size)))
def synchronize_scale_axes(axes):
pass
| {
"repo_name": "compas-dev/compas",
"path": "src/compas_plotters/core/utilities.py",
"copies": "1",
"size": "1996",
"license": "mit",
"hash": -7101602305227351000,
"line_mean": 27.1126760563,
"line_max": 89,
"alpha_frac": 0.5751503006,
"autogenerated": false,
"ratio": 4.008032128514056,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004980500285664605,
"num_lines": 71
} |
__all__ = [
'get_builder_name',
'get_builder_image_path',
'get_image_path',
'parse_images_parameter',
# Helper commands.
'chown',
'rsync',
]
import getpass
import foreman
from g1 import scripts
from g1.bases.assertions import ASSERT
from g1.containers import models
import shipyard2
def get_builder_name(name):
return name + '-builder'
def get_builder_image_path(parameters, name):
return get_image_path(parameters, name).with_name(
shipyard2.IMAGE_DIR_BUILDER_IMAGE_FILENAME
)
def get_image_path(parameters, name):
return (
parameters['//releases:root'] / \
foreman.get_relpath() /
name /
ASSERT.not_none(parameters['%s/version' % name]) /
shipyard2.IMAGE_DIR_IMAGE_FILENAME
)
def parse_images_parameter(value):
images = []
for v in value.split(','):
if v.startswith('id:'):
images.append(models.PodConfig.Image(id=v[len('id:'):]))
elif v.startswith('nv:'):
_, name, version = v.split(':', maxsplit=3)
images.append(models.PodConfig.Image(name=name, version=version))
elif v.startswith('tag:'):
images.append(models.PodConfig.Image(tag=v[len('tag:'):]))
else:
ASSERT.unreachable('unknown image parameter: {}', v)
return images
def chown(path):
user = getpass.getuser()
with scripts.using_sudo():
scripts.chown(user, user, path)
def rsync(src_path, dst_path, rsync_args=()):
scripts.run([
'rsync',
'--archive',
*rsync_args,
# Use the trailing slash trick.
'%s/' % src_path,
dst_path,
])
| {
"repo_name": "clchiou/garage",
"path": "shipyard2/shipyard2/rules/images/utils.py",
"copies": "1",
"size": "1675",
"license": "mit",
"hash": 8814770200839174000,
"line_mean": 22.5915492958,
"line_max": 77,
"alpha_frac": 0.5964179104,
"autogenerated": false,
"ratio": 3.4968684759916493,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.959160965936281,
"avg_score": 0.00033534540576794097,
"num_lines": 71
} |
__all__ = (
'get_client',
)
import pytz
from zeep import Client
from zeep.cache import SqliteCache
from zeep.transports import Transport
TZ_AR = pytz.timezone(pytz.country_timezones['ar'][0])
transport = Transport(cache=SqliteCache(timeout=86400))
wsdls = {
('wsaa', False): 'https://wsaa.afip.gov.ar/ws/services/LoginCms?wsdl',
('wsfe', False): 'https://servicios1.afip.gov.ar/wsfev1/service.asmx?WSDL',
('wsaa', True): 'https://wsaahomo.afip.gov.ar/ws/services/LoginCms?wsdl',
('wsfe', True): 'https://wswhomo.afip.gov.ar/wsfev1/service.asmx?WSDL',
}
cached_clients = {}
def get_client(service_name, sandbox=False):
"""
Returns a client for a given service.
The `sandbox` argument should only be necessary if a the client will be
used to make a request. If it will only be used to serialize objects, it is
irrelevant. Avoid the overhead of determining the sandbox mode in the
calling context if only serialization operations will take place.
:param string service_name: The name of the web services.
:param bool sandbox: Whether the sandbox (or production) environment should
be used by the returned client.
:returns: A zeep client to communicate with an AFIP webservice.
:rtype: zeep.Client
"""
key = (service_name.lower(), sandbox,)
try:
if key not in cached_clients:
cached_clients[key] = Client(wsdls[key], transport=transport)
return cached_clients[key]
except KeyError:
raise ValueError('Unknown service name, {}'.format(service_name))
| {
"repo_name": "hobarrera/django-afip",
"path": "django_afip/clients.py",
"copies": "1",
"size": "1576",
"license": "isc",
"hash": -8560964083990989000,
"line_mean": 32.5319148936,
"line_max": 79,
"alpha_frac": 0.6922588832,
"autogenerated": false,
"ratio": 3.502222222222222,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4694481105422222,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'get_declared_error_types',
'select',
]
import dataclasses
from g1.bases import typings
from g1.bases.assertions import ASSERT
NoneType = type(None)
def get_declared_error_types(response_type):
# When there is only one error type, reqrep.make_annotations
# would not generate Optional[T].
fields = dataclasses.fields(response_type.Error)
if len(fields) == 1:
return {ASSERT.issubclass(fields[0].type, Exception): fields[0].name}
else:
return {
ASSERT(
typings.is_recursive_type(field.type)
and typings.is_union_type(field.type)
and typings.match_optional_type(field.type),
'expect typing.Optional[T]: {!r}',
field,
): field.name
for field in fields
}
def select(obj):
none_field = None
for field in dataclasses.fields(obj):
value = getattr(obj, field.name)
if value is not None:
return field.name, value
elif typings.type_is_subclass(field.type, NoneType):
none_field = field.name
if none_field:
return none_field, None
return ASSERT.unreachable('expect one non-None field: {!r}', obj)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/messaging/g1/messaging/reqrep/utils.py",
"copies": "1",
"size": "1245",
"license": "mit",
"hash": 4343416570348286000,
"line_mean": 27.9534883721,
"line_max": 77,
"alpha_frac": 0.6040160643,
"autogenerated": false,
"ratio": 3.7841945288753798,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9888210593175379,
"avg_score": 0,
"num_lines": 43
} |
__all__ = [
"get_login",
"do_login",
"do_logout",
"get_apps",
"do_deploy",
"wb_error_handler",
"generic_error_handler",
]
from flask import render_template, request, redirect, url_for
from flask import session, g, current_app
from flask import Response
from wb_api import WBAPIUnauthorizedError
import json
def get_login():
if "api_token" in session:
return redirect(url_for('get_apps'))
error = request.args.get('error', None)
error_message = request.args.get('error_msg', None)
return render_template('login.html', error=error, error_message=error_message, password_reset_message=current_app.config["PASSWORD_RESET_MESSAGE"])
def do_login():
session.clear()
try:
g.wb_api.login(**request.form)
except WBAPIUnauthorizedError as e:
return redirect(url_for('get_login', error="bad_login",
error_msg=e.response.json()["details"]))
session["username"] = request.form["username"]
session["api_token"] = g.wb_api.get_token()
return redirect(url_for('get_apps'))
def do_logout():
session.clear()
return redirect(url_for('get_login'))
def get_apps():
try:
apps = g.wb_api.get_apps()
stages = g.wb_api.get_stages()
except WBAPIUnauthorizedError:
return redirect(url_for('do_logout'))
etag = '"{}"'.format(hash(json.dumps(sorted(apps, key=lambda a: a["name"]), sort_keys=True)))
headers = {'ETag': etag}
if request.headers.get('If-None-Match', None) == etag:
return Response(None, 304, headers=headers)
else:
return Response(render_template(
'index.html', apps=apps,
stages=stages,
username=session.get("username", "anonymous")), headers=headers)
def do_deploy():
app = request.form["app"]
stage = request.form["stage"]
version = request.form["version"]
try:
g.wb_api.deploy_app(app, stage, version)
except WBAPIUnauthorizedError:
return redirect(url_for('do_logout'))
return redirect(url_for('get_apps'))
def generic_error_handler(e):
error = {
"status": 502,
"msg": e.message
}
return render_template('error.html', error=error), 500
def wb_error_handler(e):
status_code = e.response.status_code
error = {"status": status_code}
try:
error["msg"] = e.response.json()["details"]
except KeyError:
error["msg"] = e.response.text
except ValueError:
error["msg"] = e.message
return render_template('error.html', error=error), status_code
def healthcheck():
return Response(None, 204)
| {
"repo_name": "hmrc/wristband-frontend",
"path": "fe/views.py",
"copies": "1",
"size": "2682",
"license": "apache-2.0",
"hash": 8010592971688773000,
"line_mean": 26.0909090909,
"line_max": 151,
"alpha_frac": 0.6159582401,
"autogenerated": false,
"ratio": 3.585561497326203,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4701519737426203,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'get_many', 'pop_many',
]
def get_many(d, required=[], optional=[], one_of=[]):
"""
Returns a predictable number of elements out of ``d`` in a list for auto-expanding.
Keys in ``required`` will raise KeyError if not found in ``d``.
Keys in ``optional`` will return None if not found in ``d``.
Keys in ``one_of`` will raise KeyError if none exist, otherwise return the first in ``d``.
Example::
uid, action, limit, offset = get_many(request.params, required=['uid', 'action'], optional=['limit', 'offset'])
Note: This function has been added to the webhelpers package.
"""
d = d or {}
r = [d[k] for k in required]
r += [d.get(k)for k in optional]
if one_of:
for k in (k for k in one_of if k in d):
return r + [d[k]]
raise KeyError("Missing a one_of value.")
return r
def pop_many(d, keys, default=None):
return [d.pop(k, default) for k in keys]
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
| {
"repo_name": "shazow/unstdlib.py",
"path": "unstdlib/standard/dict_.py",
"copies": "1",
"size": "1061",
"license": "mit",
"hash": -6806512681523439000,
"line_mean": 26.2051282051,
"line_max": 119,
"alpha_frac": 0.5956644675,
"autogenerated": false,
"ratio": 3.5366666666666666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4632331134166667,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'get_outputs',
'get_rule',
]
import itertools
import logging
import iga.context
import iga.precond
from iga.build_rules import build_rules
from iga.core import WriteOnceDict
from iga.error import IgaError
from iga.path import Glob
from iga.rule import Rule
from iga.rule import RuleFunc
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.NullHandler())
# Packages that have been loaded (no BUILD file should be executed twice).
_LOADED_PACKAGES = set()
# Map a rule's outputs to that rule.
_OUTPUT_TO_RULE = WriteOnceDict()
def get_outputs():
return frozenset(_OUTPUT_TO_RULE)
def get_rule(label, *, raises=False):
"""Return Rule object or raise IgaError (if required, else return
None) if label does not refer to a rule or an output file.
"""
if label.package not in _LOADED_PACKAGES:
_load_rules(label.package)
_LOADED_PACKAGES.add(label.package)
rule_label = _OUTPUT_TO_RULE.get(label, label)
try:
return Rule.get_object(rule_label)
except KeyError:
if raises:
raise IgaError('%s does not refer to a rule or an output file' %
(label,))
return None
def _load_rules(package):
"""Load rules from a BUILD file."""
buildfile_path = iga.context.current()['source'] / package / 'BUILD'
LOG.info('load %s', buildfile_path)
with buildfile_path.open() as buildfile:
code = buildfile.read()
code = compile(code, str(buildfile_path), 'exec')
rule_data = []
with iga.context.create() as cxt:
cxt['package'] = package
cxt['rule_data'] = rule_data
exec(code, _make_buildfile_globals())
for rule in build_rules(package, rule_data):
Rule.register(rule)
for output in itertools.chain.from_iterable(rule.outputs.values()):
_OUTPUT_TO_RULE[output] = rule.name
def _make_buildfile_globals():
varz = WriteOnceDict()
varz.update(
glob=glob,
package=_do_nothing('package'),
)
varz.update(RuleFunc.get_all_objects())
return dict(varz)
def glob(string):
iga.precond.check_type(string, str)
return Glob(string)
def _do_nothing(func_name):
def func(**kwargs):
if kwargs:
LOG.debug('%s() ignores %r', func_name, sorted(kwargs))
return func
| {
"repo_name": "clchiou/iga",
"path": "iga/package.py",
"copies": "1",
"size": "2334",
"license": "mit",
"hash": 1617790225011279400,
"line_mean": 25.2247191011,
"line_max": 76,
"alpha_frac": 0.6452442159,
"autogenerated": false,
"ratio": 3.447562776957164,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45928069928571635,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'get_selected_array_name',
'get_selected_array_field',
'copy_arrays_to_point_data',
'get_numpy_array',
'get_vtk_array',
'add_array',
'get_selected_array',
'search_for_array',
'get_all_array_names',
]
import vtk
from vtk.numpy_interface import dataset_adapter as dsa
from . import errors as _helpers
def get_selected_array_name(algorithm, idx):
"""Gets the name of the input array for a given index on a VTK algorithm
Args:
algorithm (vtkAlgorithm): A vtkAlgorithm class instantiation
idx (int): the input array index
Return:
str : the name of the input array for the given index
"""
info = algorithm.GetInputArrayInformation(idx)
return info.Get(vtk.vtkDataObject.FIELD_NAME())
def get_selected_array_field(algorithm, idx):
"""Gets the field of the input array for a given index on a VTK algorithm
Args:
algorithm (vtkAlgorithm) : A vtkAlgorithm class instantiation
idx (int) : the input array index
Return:
int : the field type of the input array for the given index
"""
info = algorithm.GetInputArrayInformation(idx)
return info.Get(vtk.vtkDataObject.FIELD_ASSOCIATION())
def get_field_id_by_name(field):
"""Get the field ID by name."""
fields = dict(
point=0,
pt=0,
p=0,
cell=1,
c=1,
field=2,
f=2,
row=6,
r=6,
)
field = field.lower()
try:
return fields[field]
except KeyError:
raise _helpers.PVGeoError(
'Field association not defined. Try inputing `point`, `cell`, `field`, or `row`.'
)
def copy_arrays_to_point_data(pdi, pdo, field):
"""Copys arrays from an input to an ouput's point data.
Args:
pdi (vtkDataObject) : The input data object to copy from
pdo (vtkDataObject) : The output data object to copy over to
field (int or str) : the field type id or name
Return:
vtkDataObject : returns the output data object parameter
"""
if isinstance(field, str):
field = get_field_id_by_name(field)
# Point Data
if field == 0:
for i in range(pdi.GetPointData().GetNumberOfArrays()):
arr = pdi.GetPointData().GetArray(i)
pdo.GetPointData().AddArray(arr)
# Cell Data: DO NOT USE
elif field == 1:
for i in range(pdi.GetCellData().GetNumberOfArrays()):
arr = pdi.GetCellData().GetArray(i)
pdo.GetPointData().AddArray(arr)
# Field Data:
elif field == 2:
for i in range(pdi.GetFieldData().GetNumberOfArrays()):
arr = pdi.GetFieldData().GetArray(i)
pdo.GetPointData().AddArray(arr)
# Row Data:
elif field == 6:
for i in range(pdi.GetRowData().GetNumberOfArrays()):
arr = pdi.GetRowData().GetArray(i)
pdo.GetPointData().AddArray(arr)
else:
raise _helpers.PVGeoError(
'Field association ({}) not defined. Try inputing Point, Cell, Field, or Row data.'.format(
field
)
)
# Field Data
return pdo
def get_numpy_array(wpdi, field, name):
"""Grabs an array from vtkDataObject given its name and field association.
Args:
wpdi (wrapped vtkDataObject) : the input data object wrapped using
vtk dataset adapter
field (int or str) : the field type id or name
name (str) : the name of the input array for the given index
Return:
numpy.array : a wrapped ``vtkDataArray`` for NumPy
"""
if isinstance(field, str):
field = get_field_id_by_name(field)
if not isinstance(wpdi, vtk.numpy_interface.dataset_adapter.DataObject):
wpdi = dsa.WrapDataObject(wpdi)
# Point Data
if field == 0:
arr = wpdi.PointData[name]
# Cell Data:
elif field == 1:
arr = wpdi.CellData[name]
# Field Data:
elif field == 2:
arr = wpdi.FieldData[name]
# Row Data:
elif field == 6:
arr = wpdi.RowData[name]
else:
raise _helpers.PVGeoError(
'Field association ({}) not defined. Try inputing Point, Cell, Field, or Row data.'.format(
field
)
)
return arr
def get_vtk_array(pdi, field, name):
"""Grabs an array from vtkDataObject given its name and field association.
Args:
pdi (vtkDataObject) : the input data object
field (int or str) : the field type id or name
name (str) : the name of the input array for the given index
Return:
vtkDataArray : the array from input field and name
"""
if isinstance(field, str):
field = get_field_id_by_name(field)
# Point Data
if field == 0:
arr = pdi.GetPointData().GetArray(name)
# Cell Data:
elif field == 1:
arr = pdi.GetCellData().GetArray(name)
# Field Data:
elif field == 2:
arr = pdi.GetFieldData().GetArray(name)
# Row Data:
elif field == 6:
arr = pdi.GetRowData().GetArray(name)
else:
raise _helpers.PVGeoError(
'Field association ({}) not defined. Try inputing Point, Cell, Field, or Row data.'.format(
field
)
)
return arr
def get_selected_array(algorithm, wpdi, idx):
"""Gets selectected array at index idx wrapped for NumPy
Args:
algorithm (vtkAlgorithm) : A vtkAlgorithm class instantiation
wpdi (wrapped vtkDataObject) : the input data object wrapped using vtk
dataset adapter
idx (int) : the input array index
Return:
numpy.array : a wrapped ``vtkDataArray`` for NumPy
"""
name = get_selected_array_name(algorithm, idx)
field = get_selected_array_field(algorithm, idx)
return get_array(wpdi, field, name)
def add_array(pdo, field, vtkArray):
"""Adds an array to a vtkDataObject given its field association.
Args:
pdo (vtkDataObject) : the output data object
field (int or str) : the field type id or name
vtkArray (vtkDataArray) : the data array to add to the output
Return:
vtkDataObject : the output data object with the data array added
"""
if isinstance(field, str):
field = get_field_id_by_name(field)
# Point Data
if field == 0:
pdo.GetPointData().AddArray(vtkArray)
# Cell Data:
elif field == 1:
pdo.GetCellData().AddArray(vtkArray)
# Field Data:
elif field == 2:
pdo.GetFieldData().AddArray(vtkArray)
# Row Data:
elif field == 6:
pdo.GetRowData().AddArray(vtkArray)
else:
raise _helpers.PVGeoError(
'Field association ({}) not defined. Try inputing Point, Cell, Field, or Row data.'.format(
field
)
)
return pdo
def _get_data(pdi, field):
"""Gets data field from input vtkDataObject"""
data = None
if isinstance(field, str):
field = get_field_id_by_name(field)
try:
# Point Data
if field == 0:
data = pdi.GetPointData()
# Cell Data:
elif field == 1:
data = pdi.GetCellData()
# Field Data:
elif field == 2:
data = pdi.GetFieldData()
# Row Data:
elif field == 6:
data = pdi.GetRowData()
else:
raise _helpers.PVGeoError(
'Field association ({}) not defined. Try inputing Point, Cell, Field, or Row data.'.format(
field
)
)
except AttributeError:
raise _helpers.PVGeoError(
'Input data does not have field type `{}`.'.format(field)
)
return data
def get_array(pdi, field, name):
"""Gets an array from a vtkDataObject given its field association and name.
Notes:
- Point Data: 0
- Cell Data: 1
- Field Data: 2
- Row Data: 6
Args:
pdi (vtkDataObject) : the input data object
field (int or str) : the field type id or name
name (str) : the data array name
Return:
vtkDataObject: the output data object
"""
if isinstance(field, str):
field = get_field_id_by_name(field)
data = _get_data(pdi, field)
return data.GetArray(name)
def search_for_array(pdi, name):
def _search_field(field):
data = _get_data(pdi, field)
for i in range(data.GetNumberOfArrays()):
if data.GetArrayName(i) == name:
return data.GetArray(i)
return None
fields = [0, 1, 2, 6]
for field in fields:
try:
arr = _search_field(field)
except _helpers.PVGeoError:
continue
if arr is not None:
# We found it!
return arr, field
raise _helpers.PVGeoError('Array `{}` not found in input data.'.format(name))
return None
def get_all_array_names(dataset, field):
if isinstance(field, str):
field = get_field_id_by_name(field)
if not isinstance(dataset, vtk.numpy_interface.dataset_adapter.DataObject):
wpdi = dsa.WrapDataObject(dataset)
else:
wpdi = dataset
# Point Data
if field == 0:
return wpdi.PointData.keys()
# Cell Data:
elif field == 1:
return wpdi.CellData.keys()
# Field Data:
elif field == 2:
return wpdi.FieldData.keys()
# Row Data:
elif field == 6:
return wpdi.RowData.keys()
else:
raise _helpers.PVGeoError(
'Field association ({}) not defined. Try inputing Point, Cell, Field, or Row data.'.format(
field
)
)
return None
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/_helpers/arrays.py",
"copies": "1",
"size": "9733",
"license": "bsd-3-clause",
"hash": -560654485157803600,
"line_mean": 27.9672619048,
"line_max": 107,
"alpha_frac": 0.5873831296,
"autogenerated": false,
"ratio": 3.8153665229321834,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49027496525321834,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'GSLibReader',
'GSLibPointSetReader',
'WriteTableToGSLib',
]
__displayname__ = 'GSLib/GeoEAS File I/O'
import numpy as np
from .. import _helpers, interface
from ..base import WriterBase
from ..readers import DelimitedPointsReaderBase, DelimitedTextReader
class _GSLibReaderMethods(object):
"""A helper class to handle overriding of delimited text reading methods
for all GSLib readers."""
# NOTE: order of inherritance matters ALOT!
_header = None
extensions = 'sgems dat geoeas gslib GSLIB txt SGEMS SGeMS'
def _extract_header(self, content):
self._header = content[0]
try:
num = int(content[1]) # number of data columns
except ValueError:
raise _helpers.PVGeoError('This file is not in proper GSLIB format.')
titles = [ln.rstrip('\r\n') for ln in content[2 : 2 + num]]
return titles, content[2 + num : :]
#### Seters and Geters ####
def get_file_header(self):
"""Returns the file header. If file hasn't been read, returns ``None``"""
return self._header
class GSLibReader(_GSLibReaderMethods, DelimitedTextReader):
"""Reads a GSLIB file format to a ``vtkTable``. The GSLIB file format has
headers lines followed by the data as a space delimited ASCI file (this
filter is set up to allow you to choose any single character delimiter).
The first header line is the title and will be printed to the console.
This line may have the dimensions for a grid to be made of the data.
The second line is the number (n) of columns of data. The next n lines are
the variable names for the data in each column. You are allowed up to ten
characters for the variable name. The data follow with a space between each
field (column).
"""
__displayname__ = 'GSLib Table Reader'
__category__ = 'reader'
description = 'PVGeo: GSLib Table'
def __init__(self, outputType='vtkTable', **kwargs):
DelimitedTextReader.__init__(self, outputType=outputType, **kwargs)
self.set_split_on_white_space(True)
class GSLibPointSetReader(_GSLibReaderMethods, DelimitedPointsReaderBase):
"""Reads a GSLib point set file where the first three columns are the XYZ
coordinates and the remainder of the data is consistent with the
:class:`GSLibReader` specifications."""
__displayname__ = 'GSLib Point Set Reader'
__category__ = 'reader'
description = 'PVGeo: GSLib Point Set'
extensions = _GSLibReaderMethods.extensions + 'gslibpts ptset gpts'
def __init__(self, **kwargs):
DelimitedPointsReaderBase.__init__(self, **kwargs)
self.set_split_on_white_space(True)
class WriteTableToGSLib(WriterBase):
"""Write the row data in a ``vtkTable`` to the GSLib Format"""
__displayname__ = 'Write ``vtkTable`` To GSLib Format'
__category__ = 'writer'
def __init__(self, inputType='vtkTable'):
WriterBase.__init__(self, inputType=inputType, ext='gslib')
self._header = 'Data saved by PVGeo'
def perform_write_out(self, input_data_object, filename, object_name):
"""Write out the input data object to the GSLib file format"""
# Get the input data object
table = input_data_object
numArrs = table.GetRowData().GetNumberOfArrays()
arrs = []
titles = []
# Get data arrays
for i in range(numArrs):
vtkarr = table.GetRowData().GetArray(i)
arrs.append(interface.convert_array(vtkarr))
titles.append(vtkarr.GetName())
header = '%s\n' % self._header
header += '%d\n' % len(titles)
datanames = '\n'.join(titles)
header += datanames
arrs = np.array(arrs).T
np.savetxt(filename, arrs, comments='', header=header, fmt=self.get_format())
return 1
def set_header(self, header):
"""Set the file header string"""
if self._header != header:
self._header = header
self.Modified()
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/gslib/gslib.py",
"copies": "1",
"size": "4031",
"license": "bsd-3-clause",
"hash": 5172806691811774000,
"line_mean": 34.052173913,
"line_max": 85,
"alpha_frac": 0.647233937,
"autogenerated": false,
"ratio": 3.82085308056872,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49680870175687203,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Headers',
]
from kitsu.http.errors import *
_canonicalHeaderParts = { 'www' : 'WWW' }
def _canonicalHeaderName(name):
def canonical(part):
return _canonicalHeaderParts.get(part) or part.capitalize()
return '-'.join(canonical(part.lower()) for part in name.split('-'))
nil = object()
class Header(object):
__slots__ = ('name', 'value', 'prev', 'next', 'prev_value', 'next_value')
def __init__(self, name, value, prev=None, next=None, prev_value=None, next_value=None):
self.name = name
self.value = value
self.prev = prev
self.next = next
self.prev_value = prev_value
self.next_value = next_value
def unlink(self):
if self.prev is not None:
self.prev.next = self.next
if self.next is not None:
self.next.prev = self.prev
if self.prev_value is not None:
self.prev_value.next_value = self.next_value
if self.next_value is not None:
self.next_value.prev_value = self.prev_value
self.prev = None
self.next = None
self.prev_value = None
self.next_value = None
def __repr__(self):
def objref(obj):
if isinstance(obj, Header):
return "0x%x" % id(obj)
return repr(obj)
return "<Header(name=%r, value=%r, prev=%s, next=%s, prev_value=%s, next_value=%s) at 0x%x>" % (self.name, self.value, objref(self.prev), objref(self.next), objref(self.prev_value), objref(self.next_value), id(self))
class Headers(object):
__slots__ = ('__head', '__tail', '__values', 'encoding', '__partialHeader', '__weakref__')
def __init__(self, data=(), encoding='utf-8'):
self.__head = None
self.__tail = None
self.__values = {}
self.encoding = encoding
self.__partialHeader = None
if data:
self.update(data)
def __del__(self):
self.__remove()
def __make_key(self, name):
if not isinstance(name, basestring):
raise KeyError(name)
name = name.lower()
if isinstance(name, unicode):
name = name.encode(self.encoding)
return name
def __make_text(self, value, canonical=False):
assert isinstance(value, basestring)
if canonical:
value = _canonicalHeaderName(value)
if isinstance(value, unicode):
value = value.encode(self.encoding)
return value
def __iter(self, *args):
if not args:
header = self.__head
while header is not None:
next = header.next
yield header
header = next
else:
item = self.__values.get(self.__make_key(args[0]))
if item is not None:
header = item[0]
else:
header = None
while header is not None:
next = header.next_value
yield header
header = next
def __remove(self, *args):
if not args:
header = self.__head
while header is not None:
next = header.next
header.unlink()
header = next
self.__head = None
self.__tail = None
self.__values.clear()
else:
item = self.__values.pop(self.__make_key(args[0]), None)
if item is not None:
header = item[0]
else:
header = None
while header is not None:
next = header.next_value
if self.__head is header:
self.__head = header.next
if self.__tail is header:
self.__tail = header.prev
header.unlink()
header = next
def __append(self, name, value):
key = self.__make_key(name)
if not isinstance(value, basestring):
value = str(value)
item = self.__values.get(key)
if item is None:
item = self.__values[key] = [None, None]
header = Header(name, value, prev=self.__tail, prev_value=item[1])
if header.prev is not None:
header.prev.next = header
if header.prev_value is not None:
header.prev_value.next_value = header
if item[0] is None:
item[0] = header
item[1] = header
if self.__head is None:
self.__head = header
self.__tail = header
def __getitem__(self, name):
if self.__make_key(name) not in self.__values:
raise KeyError(name)
return ', '.join(header.value for header in self.__iter(name))
def __setitem__(self, name, value):
self.__remove(name)
if value is None:
return
elif isinstance(value, basestring):
self.__append(name, value)
elif not isinstance(value, (list, tuple)):
self.__append(name, str(value))
else:
for value in value:
self.__append(name, value)
def __delitem__(self, name):
if self.__make_key(name) not in self.__values:
raise KeyError(name)
self.__remove(name)
def __iter__(self):
for header in self.__iter():
yield header.name
def __contains__(self, name):
return self.__make_key(name) in self.__values
def iterkeys(self):
for header in self.__iter():
yield header.name
def itervalues(self):
for header in self.__iter():
yield header.value
def iteritems(self):
for header in self.__iter():
yield (header.name, header.value)
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def items(self):
return list(self.iteritems())
def getlist(self, name, default=nil):
if self.__make_key(name) in self.__values:
return [header.value for header in self.__iter(name)]
if default is nil:
return []
return default
def poplist(self, name, default=nil):
if self.__make_key(name) in self.__values:
value = [header.value for header in self.__iter(name)]
self.__remove(name)
return value
if default is nil:
raise KeyError(name)
return default
def get(self, name, default=None):
if self.__make_key(name) in self.__values:
return ', '.join(self.getlist(name))
return default
def pop(self, name, default=nil):
if self.__make_key(name) in self.__values:
return ', '.join(self.poplist(name))
if default is nil:
raise KeyError(name)
return default
def setdefault(self, name, value):
if self.__make_key(name) not in self.__values:
self[name] = value
if value is None:
return None
return ', '.join(self.getlist(name))
def setdefaultlist(self, name, value):
if self.__make_key(name) not in self.__values:
self[name] = value
if value is None:
return None
return self.getlist(name)
def add(self, name, value):
if value is None:
return
elif isinstance(value, basestring):
self.__append(name, value)
elif not isinstance(value, (list, tuple)):
self.__append(name, str(value))
else:
for value in value:
self.__append(name, value)
def clear(self):
self.__remove()
def update(self, data=(), merge=False):
if hasattr(data, 'iteritems'):
data = data.iteritems()
seen = set()
for name, value in data:
key = self.__make_key(name)
if key not in seen:
if not merge:
self.__remove(name)
seen.add(key)
if value is None:
continue
elif isinstance(value, basestring):
self.__append(name, value)
elif not isinstance(value, (list, tuple)):
self.__append(name, str(value))
else:
for value in value:
self.__append(name, value)
def toLines(self, lines=None, canonical=False):
if lines is None:
lines = []
for name, value in self.iteritems():
name = self.__make_text(name, canonical=canonical)
value = self.__make_text(value)
lines.append("%s: %s\r\n" % (name, value))
return lines
def toString(self, canonical=False):
return ''.join(self.toLines(canonical=canonical))
def __str__(self):
return self.toString()
def __repr__(self):
return "Headers({%s})" % ', '.join("%r: %r" % (name, value) for (name, value) in self.iteritems())
def parseClear(self):
self.__partialHeader = None
def parseFlush(self):
if self.__partialHeader:
header = '\r\n'.join(self.__partialHeader)
self.__partialHeader = None
parts = header.split(':', 1)
if len(parts) != 2:
raise HTTPDataError("header must be in 'name: value' format")
name = parts[0].rstrip()
value = parts[1].strip()
if not name:
raise HTTPDataError("header must be in 'name: value' format")
self.add(name, value)
def parseLine(self, line):
if not line or not line[0] in ' \t':
self.parseFlush()
if line:
self.__partialHeader = [line]
else:
if self.__partialHeader:
self.__partialHeader.append(line)
return line and True or False
| {
"repo_name": "snaury/kitsu.http",
"path": "kitsu/http/headers.py",
"copies": "1",
"size": "10038",
"license": "mit",
"hash": 2147044326410370800,
"line_mean": 31.5909090909,
"line_max": 224,
"alpha_frac": 0.5152420801,
"autogenerated": false,
"ratio": 4.249788314987299,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5265030395087299,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'HelpOperation',
'ListOperation',
'UploadOperation',
'DownloadOperation',
'InfoOperation',
'DeleteOperation',
]
from .help import HelpOperation
from .list import ListOperation
from .upload import UploadOperation
from .download import DownloadOperation
from .info import InfoOperation
from .delete import DeleteOperation
def make(command, group_id, args, options):
"""
Generate operation class from arguments.
:param command: command string
:param args: list of argument
:param options: dict of options
:return: Raise AssertionError when failure
"""
if command == 'list':
return ListOperation(group_id, args, options['output'])
if command == 'upload':
return UploadOperation(group_id, args, options['force'], options['print_only'])
if command == 'download':
return DownloadOperation(group_id, args, options['print_only'])
if command == 'info':
return InfoOperation(group_id, args, options['output'])
if command == 'delete':
return DeleteOperation(group_id, args, options['print_only'])
raise AssertionError('Unknown command: %s' % command)
| {
"repo_name": "mogproject/artifact-cli",
"path": "src/artifactcli/operation/__init__.py",
"copies": "1",
"size": "1169",
"license": "apache-2.0",
"hash": 1268293663820252400,
"line_mean": 32.4,
"line_max": 87,
"alpha_frac": 0.6852010265,
"autogenerated": false,
"ratio": 4.32962962962963,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003246753246753247,
"num_lines": 35
} |
__all__ = [
'Hider',
'check_dependencies',
'choicify',
'get_key_by_value',
]
# Django imports
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
from ..exceptions import DependencyError
class Hider(object):
def __get__(self,instance,owner):
raise AttributeError("Hidden attrbute")
def __set__(self, obj, val):
raise AttributeError("Hidden attribute")
def check_dependencies(dependencies, module):
"""
Ensure dependencies of a module are listed in settings.INSTALLED_APPS
:dependencies string | list: list of dependencies to check
:module string: string representing the path to the current app
"""
if type(dependencies) == str:
dependencies = [dependencies]
elif type(dependencies) != list:
raise TypeError('dependencies argument must be of type list or string')
for dependency in dependencies:
if dependency not in settings.INSTALLED_APPS:
raise DependencyError('%s depends on %s, which should be in settings.INSTALLED_APPS' % (module, dependency))
def choicify(dictionary):
"""
Converts a readable python dictionary into a django model/form
choice structure (list of tuples) ordered based on the values of each key
:param dictionary: the dictionary to convert
"""
# get order of the fields
ordered_fields = sorted(dictionary, key=dictionary.get)
choices = []
# loop over each field
for field in ordered_fields:
# build tuple (value, i18n_key)
row = (dictionary[field], _(field.replace('_', ' ')))
# append tuple to choices
choices.append(row)
# return django sorted choices
return choices
def get_key_by_value(dictionary, search_value):
"""
searchs a value in a dicionary and returns the key of the first occurrence
:param dictionary: dictionary to search in
:param search_value: value to search for
"""
for key, value in dictionary.iteritems():
if value == search_value:
return ugettext(key)
def pause_disconnectable_signals():
"""
Disconnects non critical signals like notifications, websockets and stuff like that.
Use when managing large chunks of nodes
"""
for signal in DISCONNECTABLE_SIGNALS:
signal['disconnect']()
def resume_disconnectable_signals():
"""
Reconnects non critical signals like notifications, websockets and stuff like that.
Use when managing large chunks of nodes
"""
for signal in DISCONNECTABLE_SIGNALS:
signal['reconnect']()
| {
"repo_name": "sololuz/cibb-web",
"path": "app/core/utils/misc.py",
"copies": "1",
"size": "2666",
"license": "bsd-3-clause",
"hash": 8136893400721822000,
"line_mean": 27.6666666667,
"line_max": 120,
"alpha_frac": 0.6762940735,
"autogenerated": false,
"ratio": 4.473154362416107,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5649448435916107,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'HTML5TreeBuilder',
]
from bs4.builder import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
import html5lib
from html5lib.constants import DataLossWarning
import warnings
from bs4.element import (
Comment,
Doctype,
NavigableString,
Tag,
)
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree."""
features = ['html5lib', PERMISSIVE, HTML_5, HTML]
def prepare_markup(self, markup, user_specified_encoding):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
return markup, None, None
# These methods are defined by Beautiful Soup.
def feed(self, markup):
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
doc = parser.parse(markup, encoding=self.user_specified_encoding)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, unicode):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
self.soup, namespaceHTMLElements)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
def __init__(self, soup, namespaceHTMLElements):
self.soup = soup
if namespaceHTMLElements:
warnings.warn("namespaceHTMLElements not supported yet",
DataLossWarning)
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
if namespace is not None:
warnings.warn("BeautifulSoup cannot represent elements in any namespace", DataLossWarning)
return Element(Tag(self.soup, self.soup.builder, name), self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
self.soup.insert(len(self.soup.contents), node.element)
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
return self.soup
def getFragment(self):
return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
"set attr", name, value
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(html5lib.treebuilders._base.Node):
def __init__(self, element, soup, namespace):
html5lib.treebuilders._base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def _nodeIndex(self, node, refNode):
# Finds a node by identity rather than equality
for index in range(len(self.element.contents)):
if id(self.element.contents[index]) == id(refNode.element):
return index
return None
def appendChild(self, node):
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# Concatenate new text onto old text node
# (TODO: This has O(n^2) performance, for input like "a</a>a</a>a</a>...")
newStr = NavigableString(self.element.contents[-1]+node.element)
# Remove the old text node
# (Can't simply use .extract() by itself, because it fails if
# an equal text node exists within the parent node)
oldElement = self.element.contents[-1]
del self.element.contents[-1]
oldElement.parent = None
oldElement.extract()
self.element.insert(len(self.element.contents), newStr)
else:
self.element.insert(len(self.element.contents), node.element)
node.parent = self
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and attributes != {}:
for name, value in list(attributes.items()):
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
# The Tag constructor calls this method automatically,
# but html5lib creates a Tag object before setting up
# the attributes.
self.element.contains_substitutions = (
self.soup.builder.set_up_substitutions(
self.element))
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
text = TextNode(NavigableString(data), self.soup)
if insertBefore:
self.insertBefore(text, insertBefore)
else:
self.appendChild(text)
def insertBefore(self, node, refNode):
index = self._nodeIndex(node, refNode)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
newStr = NavigableString(self.element.contents[index-1]+node.element)
oldNode = self.element.contents[index-1]
del self.element.contents[index-1]
oldNode.parent = None
oldNode.extract()
self.element.insert(index-1, newStr)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
index = self._nodeIndex(node.parent, node)
del node.parent.element.contents[index]
node.element.parent = None
node.element.extract()
node.parent = None
def reparentChildren(self, newParent):
while self.element.contents:
child = self.element.contents[0]
child.extract()
if isinstance(child, Tag):
newParent.appendChild(Element(child, self.soup, namespaces["html"]))
else:
newParent.appendChild(TextNode(child, self.soup))
def cloneNode(self):
node = Element(Tag(self.soup, self.soup.builder, self.element.name), self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
html5lib.treebuilders._base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
| {
"repo_name": "colinmcd94/kickdata",
"path": "lib/build/lib/bs4/builder/_html5lib.py",
"copies": "2",
"size": "8266",
"license": "apache-2.0",
"hash": -1977692613134710500,
"line_mean": 34.4763948498,
"line_max": 103,
"alpha_frac": 0.6283571256,
"autogenerated": false,
"ratio": 4.215196328403875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0021907149231980456,
"num_lines": 233
} |
__all__ = [
'HTML5TreeBuilder',
]
from pdb import set_trace
import warnings
from bs4.builder import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from bs4.element import (
NamespacedAttribute,
whitespace_re,
)
import html5lib
from html5lib.constants import namespaces
from bs4.element import (
Comment,
Doctype,
NavigableString,
Tag,
)
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree."""
NAME = "html5lib"
features = [NAME, PERMISSIVE, HTML_5, HTML]
def prepare_markup(self, markup, user_specified_encoding,
document_declared_encoding=None, exclude_encodings=None):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
# document_declared_encoding and exclude_encodings aren't used
# ATM because the html5lib TreeBuilder doesn't use
# UnicodeDammit.
if exclude_encodings:
warnings.warn("You provided a value for exclude_encoding, but the html5lib tree builder doesn't support exclude_encoding.")
yield (markup, None, None, False)
# These methods are defined by Beautiful Soup.
def feed(self, markup):
if self.soup.parse_only is not None:
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
doc = parser.parse(markup, encoding=self.user_specified_encoding)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, str):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
self.soup, namespaceHTMLElements)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return '<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
def __init__(self, soup, namespaceHTMLElements):
self.soup = soup
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
tag = self.soup.new_tag(name, namespace)
return Element(tag, self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
# XXX This code is not covered by the BS4 tests.
self.soup.append(node.element)
def getDocument(self):
return self.soup
def getFragment(self):
return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
# If this attribute is a multi-valued attribute for this element,
# turn its value into a list.
list_attr = HTML5TreeBuilder.cdata_list_attributes
if (name in list_attr['*']
or (self.element.name in list_attr
and name in list_attr[self.element.name])):
# A node that is being cloned may have already undergone
# this procedure.
if not isinstance(value, list):
value = whitespace_re.split(value)
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __len__(self):
return len(self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(html5lib.treebuilders._base.Node):
def __init__(self, element, soup, namespace):
html5lib.treebuilders._base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def appendChild(self, node):
string_child = child = None
if isinstance(node, str):
# Some other piece of code decided to pass in a string
# instead of creating a TextElement object to contain the
# string.
string_child = child = node
elif isinstance(node, Tag):
# Some other piece of code decided to pass in a Tag
# instead of creating an Element object to contain the
# Tag.
child = node
elif node.element.__class__ == NavigableString:
string_child = child = node.element
else:
child = node.element
if not isinstance(child, str) and child.parent is not None:
node.element.extract()
if (string_child and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# We are appending a string onto another string.
# TODO This has O(n^2) performance, for input like
# "a</a>a</a>a</a>..."
old_element = self.element.contents[-1]
new_element = self.soup.new_string(old_element + string_child)
old_element.replace_with(new_element)
self.soup._most_recent_element = new_element
else:
if isinstance(node, str):
# Create a brand new NavigableString from this string.
child = self.soup.new_string(node)
# Tell Beautiful Soup to act as if it parsed this element
# immediately after the parent's last descendant. (Or
# immediately after the parent, if it has no children.)
if self.element.contents:
most_recent_element = self.element._last_descendant(False)
elif self.element.next_element is not None:
# Something from further ahead in the parse tree is
# being inserted into this earlier element. This is
# very annoying because it means an expensive search
# for the last element in the tree.
most_recent_element = self.soup._last_descendant()
else:
most_recent_element = self.element
self.soup.object_was_parsed(
child, parent=self.element,
most_recent_element=most_recent_element)
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and len(attributes) > 0:
converted_attributes = []
for name, value in list(attributes.items()):
if isinstance(name, tuple):
new_name = NamespacedAttribute(*name)
del attributes[name]
attributes[new_name] = value
self.soup.builder._replace_cdata_list_attribute_values(
self.name, attributes)
for name, value in list(attributes.items()):
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
#
# The Tag constructor called this method when the Tag was created,
# but we just set/changed the attributes, so call it again.
self.soup.builder.set_up_substitutions(self.element)
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
if insertBefore:
text = TextNode(self.soup.new_string(data), self.soup)
self.insertBefore(data, insertBefore)
else:
self.appendChild(data)
def insertBefore(self, node, refNode):
index = self.element.index(refNode.element)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
old_node = self.element.contents[index-1]
new_str = self.soup.new_string(old_node + node.element)
old_node.replace_with(new_str)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
node.element.extract()
def reparentChildren(self, new_parent):
"""Move all of this tag's children into another tag."""
# print "MOVE", self.element.contents
# print "FROM", self.element
# print "TO", new_parent.element
element = self.element
new_parent_element = new_parent.element
# Determine what this tag's next_element will be once all the children
# are removed.
final_next_element = element.next_sibling
new_parents_last_descendant = new_parent_element._last_descendant(False, False)
if len(new_parent_element.contents) > 0:
# The new parent already contains children. We will be
# appending this tag's children to the end.
new_parents_last_child = new_parent_element.contents[-1]
new_parents_last_descendant_next_element = new_parents_last_descendant.next_element
else:
# The new parent contains no children.
new_parents_last_child = None
new_parents_last_descendant_next_element = new_parent_element.next_element
to_append = element.contents
append_after = new_parent_element.contents
if len(to_append) > 0:
# Set the first child's previous_element and previous_sibling
# to elements within the new parent
first_child = to_append[0]
if new_parents_last_descendant:
first_child.previous_element = new_parents_last_descendant
else:
first_child.previous_element = new_parent_element
first_child.previous_sibling = new_parents_last_child
if new_parents_last_descendant:
new_parents_last_descendant.next_element = first_child
else:
new_parent_element.next_element = first_child
if new_parents_last_child:
new_parents_last_child.next_sibling = first_child
# Fix the last child's next_element and next_sibling
last_child = to_append[-1]
last_child.next_element = new_parents_last_descendant_next_element
if new_parents_last_descendant_next_element:
new_parents_last_descendant_next_element.previous_element = last_child
last_child.next_sibling = None
for child in to_append:
child.parent = new_parent_element
new_parent_element.contents.append(child)
# Now that this element has no children, change its .next_element.
element.contents = []
element.next_element = final_next_element
# print "DONE WITH MOVE"
# print "FROM", self.element
# print "TO", new_parent_element
def cloneNode(self):
tag = self.soup.new_tag(self.element.name, self.namespace)
node = Element(tag, self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
html5lib.treebuilders._base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
| {
"repo_name": "crlang/sublime-text---front-end-config",
"path": "Data/Packages/CodeFormatter/codeformatter/lib/htmlbeautifier/bs4/builder/_html5lib.py",
"copies": "15",
"size": "12768",
"license": "mit",
"hash": 4580490675864824300,
"line_mean": 37.4578313253,
"line_max": 159,
"alpha_frac": 0.6184993734,
"autogenerated": false,
"ratio": 4.280254777070064,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016129484459347612,
"num_lines": 332
} |
__all__ = [
'HTML5TreeBuilder',
]
import warnings
from argeweb.libs.bs4.builder import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from argeweb.libs.bs4.element import NamespacedAttribute
import html5lib
from html5lib.constants import namespaces
from argeweb.libs.bs4.element import (
Comment,
Doctype,
NavigableString,
Tag,
)
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree."""
features = ['html5lib', PERMISSIVE, HTML_5, HTML]
def prepare_markup(self, markup, user_specified_encoding):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
yield (markup, None, None, False)
# These methods are defined by Beautiful Soup.
def feed(self, markup):
if self.soup.parse_only is not None:
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
doc = parser.parse(markup, encoding=self.user_specified_encoding)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, unicode):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
self.soup, namespaceHTMLElements)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
def __init__(self, soup, namespaceHTMLElements):
self.soup = soup
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
tag = self.soup.new_tag(name, namespace)
return Element(tag, self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
# XXX This code is not covered by the BS4 tests.
self.soup.append(node.element)
def getDocument(self):
return self.soup
def getFragment(self):
return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
"set attr", name, value
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __len__(self):
return len(self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(html5lib.treebuilders._base.Node):
def __init__(self, element, soup, namespace):
html5lib.treebuilders._base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def appendChild(self, node):
string_child = child = None
if isinstance(node, basestring):
# Some other piece of code decided to pass in a string
# instead of creating a TextElement object to contain the
# string.
string_child = child = node
elif isinstance(node, Tag):
# Some other piece of code decided to pass in a Tag
# instead of creating an Element object to contain the
# Tag.
child = node
elif node.element.__class__ == NavigableString:
string_child = child = node.element
else:
child = node.element
if not isinstance(child, basestring) and child.parent is not None:
node.element.extract()
if (string_child and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# We are appending a string onto another string.
# TODO This has O(n^2) performance, for input like
# "a</a>a</a>a</a>..."
old_element = self.element.contents[-1]
new_element = self.soup.new_string(old_element + string_child)
old_element.replace_with(new_element)
self.soup._most_recent_element = new_element
else:
if isinstance(node, basestring):
# Create a brand new NavigableString from this string.
child = self.soup.new_string(node)
# Tell Beautiful Soup to act as if it parsed this element
# immediately after the parent's last descendant. (Or
# immediately after the parent, if it has no children.)
if self.element.contents:
most_recent_element = self.element._last_descendant(False)
else:
most_recent_element = self.element
self.soup.object_was_parsed(
child, parent=self.element,
most_recent_element=most_recent_element)
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and len(attributes) > 0:
converted_attributes = []
for name, value in list(attributes.items()):
if isinstance(name, tuple):
new_name = NamespacedAttribute(*name)
del attributes[name]
attributes[new_name] = value
self.soup.builder._replace_cdata_list_attribute_values(
self.name, attributes)
for name, value in attributes.items():
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
#
# The Tag constructor called this method when the Tag was created,
# but we just set/changed the attributes, so call it again.
self.soup.builder.set_up_substitutions(self.element)
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
if insertBefore:
text = TextNode(self.soup.new_string(data), self.soup)
self.insertBefore(data, insertBefore)
else:
self.appendChild(data)
def insertBefore(self, node, refNode):
index = self.element.index(refNode.element)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
old_node = self.element.contents[index-1]
new_str = self.soup.new_string(old_node + node.element)
old_node.replace_with(new_str)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
node.element.extract()
def reparentChildren(self, new_parent):
"""Move all of this tag's children into another tag."""
element = self.element
new_parent_element = new_parent.element
# Determine what this tag's next_element will be once all the children
# are removed.
final_next_element = element.next_sibling
new_parents_last_descendant = new_parent_element._last_descendant(False, False)
if len(new_parent_element.contents) > 0:
# The new parent already contains children. We will be
# appending this tag's children to the end.
new_parents_last_child = new_parent_element.contents[-1]
new_parents_last_descendant_next_element = new_parents_last_descendant.next_element
else:
# The new parent contains no children.
new_parents_last_child = None
new_parents_last_descendant_next_element = new_parent_element.next_element
to_append = element.contents
append_after = new_parent.element.contents
if len(to_append) > 0:
# Set the first child's previous_element and previous_sibling
# to elements within the new parent
first_child = to_append[0]
first_child.previous_element = new_parents_last_descendant
first_child.previous_sibling = new_parents_last_child
# Fix the last child's next_element and next_sibling
last_child = to_append[-1]
last_child.next_element = new_parents_last_descendant_next_element
last_child.next_sibling = None
for child in to_append:
child.parent = new_parent_element
new_parent_element.contents.append(child)
# Now that this element has no children, change its .next_element.
element.contents = []
element.next_element = final_next_element
def cloneNode(self):
tag = self.soup.new_tag(self.element.name, self.namespace)
node = Element(tag, self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
html5lib.treebuilders._base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
| {
"repo_name": "argeweb/start",
"path": "argeweb/libs/bs4/builder/_html5lib.py",
"copies": "1",
"size": "10686",
"license": "mit",
"hash": -3433142352263756300,
"line_mean": 36.4947368421,
"line_max": 159,
"alpha_frac": 0.6231517874,
"autogenerated": false,
"ratio": 4.2270569620253164,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5350208749425316,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'HTML5TreeBuilder',
]
import warnings
from bs4.builder import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from bs4.element import NamespacedAttribute
import html5lib
from html5lib.constants import namespaces
from bs4.element import (
Comment,
Doctype,
NavigableString,
Tag,
)
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree."""
features = ['html5lib', PERMISSIVE, HTML_5, HTML]
def prepare_markup(self, markup, user_specified_encoding):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
return markup, None, None, False
# These methods are defined by Beautiful Soup.
def feed(self, markup):
if self.soup.parse_only is not None:
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
doc = parser.parse(markup, encoding=self.user_specified_encoding)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, str):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
self.soup, namespaceHTMLElements)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return '<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
def __init__(self, soup, namespaceHTMLElements):
self.soup = soup
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
tag = self.soup.new_tag(name, namespace)
return Element(tag, self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
# XXX This code is not covered by the BS4 tests.
self.soup.append(node.element)
def getDocument(self):
return self.soup
def getFragment(self):
return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
"set attr", name, value
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __len__(self):
return len(self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(html5lib.treebuilders._base.Node):
def __init__(self, element, soup, namespace):
html5lib.treebuilders._base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def appendChild(self, node):
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# Concatenate new text onto old text node
# XXX This has O(n^2) performance, for input like
# "a</a>a</a>a</a>..."
old_element = self.element.contents[-1]
new_element = self.soup.new_string(old_element + node.element)
old_element.replace_with(new_element)
else:
self.element.append(node.element)
node.parent = self
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and len(attributes) > 0:
converted_attributes = []
for name, value in list(attributes.items()):
if isinstance(name, tuple):
new_name = NamespacedAttribute(*name)
del attributes[name]
attributes[new_name] = value
self.soup.builder._replace_cdata_list_attribute_values(
self.name, attributes)
for name, value in list(attributes.items()):
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
#
# The Tag constructor called this method when the Tag was created,
# but we just set/changed the attributes, so call it again.
self.soup.builder.set_up_substitutions(self.element)
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
text = TextNode(self.soup.new_string(data), self.soup)
if insertBefore:
self.insertBefore(text, insertBefore)
else:
self.appendChild(text)
def insertBefore(self, node, refNode):
index = self.element.index(refNode.element)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
old_node = self.element.contents[index-1]
new_str = self.soup.new_string(old_node + node.element)
old_node.replace_with(new_str)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
node.element.extract()
def reparentChildren(self, newParent):
while self.element.contents:
child = self.element.contents[0]
child.extract()
if isinstance(child, Tag):
newParent.appendChild(
Element(child, self.soup, namespaces["html"]))
else:
newParent.appendChild(
TextNode(child, self.soup))
def cloneNode(self):
tag = self.soup.new_tag(self.element.name, self.namespace)
node = Element(tag, self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
html5lib.treebuilders._base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
| {
"repo_name": "kevlar1818/mipster",
"path": "bs4/builder/_html5lib.py",
"copies": "2",
"size": "7747",
"license": "mit",
"hash": -6580460414988890000,
"line_mean": 33.8963963964,
"line_max": 159,
"alpha_frac": 0.6223054085,
"autogenerated": false,
"ratio": 4.156115879828326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002121763311189617,
"num_lines": 222
} |
__all__ = [
'HTML5TreeBuilder',
]
import warnings
from ..builder import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from ..element import NamespacedAttribute
import html5lib
from html5lib.constants import namespaces
from ..element import (
Comment,
Doctype,
NavigableString,
Tag,
)
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree."""
features = ['html5lib', PERMISSIVE, HTML_5, HTML]
def prepare_markup(self, markup, user_specified_encoding):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
yield (markup, None, None, False)
# These methods are defined by Beautiful Soup.
def feed(self, markup):
if self.soup.parse_only is not None:
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
doc = parser.parse(markup, encoding=self.user_specified_encoding)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, unicode):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
self.soup, namespaceHTMLElements)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
def __init__(self, soup, namespaceHTMLElements):
self.soup = soup
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
tag = self.soup.new_tag(name, namespace)
return Element(tag, self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
# XXX This code is not covered by the BS4 tests.
self.soup.append(node.element)
def getDocument(self):
return self.soup
def getFragment(self):
return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
"set attr", name, value
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __len__(self):
return len(self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(html5lib.treebuilders._base.Node):
def __init__(self, element, soup, namespace):
html5lib.treebuilders._base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def appendChild(self, node):
string_child = child = None
if isinstance(node, basestring):
# Some other piece of code decided to pass in a string
# instead of creating a TextElement object to contain the
# string.
string_child = child = node
elif isinstance(node, Tag):
# Some other piece of code decided to pass in a Tag
# instead of creating an Element object to contain the
# Tag.
child = node
elif node.element.__class__ == NavigableString:
string_child = child = node.element
else:
child = node.element
if not isinstance(child, basestring) and child.parent is not None:
node.element.extract()
if (string_child and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# We are appending a string onto another string.
# TODO This has O(n^2) performance, for input like
# "a</a>a</a>a</a>..."
old_element = self.element.contents[-1]
new_element = self.soup.new_string(old_element + string_child)
old_element.replace_with(new_element)
self.soup._most_recent_element = new_element
else:
if isinstance(node, basestring):
# Create a brand new NavigableString from this string.
child = self.soup.new_string(node)
# Tell Beautiful Soup to act as if it parsed this element
# immediately after the parent's last descendant. (Or
# immediately after the parent, if it has no children.)
if self.element.contents:
most_recent_element = self.element._last_descendant(False)
else:
most_recent_element = self.element
self.soup.object_was_parsed(
child, parent=self.element,
most_recent_element=most_recent_element)
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and len(attributes) > 0:
converted_attributes = []
for name, value in list(attributes.items()):
if isinstance(name, tuple):
new_name = NamespacedAttribute(*name)
del attributes[name]
attributes[new_name] = value
self.soup.builder._replace_cdata_list_attribute_values(
self.name, attributes)
for name, value in attributes.items():
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
#
# The Tag constructor called this method when the Tag was created,
# but we just set/changed the attributes, so call it again.
self.soup.builder.set_up_substitutions(self.element)
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
if insertBefore:
text = TextNode(self.soup.new_string(data), self.soup)
self.insertBefore(data, insertBefore)
else:
self.appendChild(data)
def insertBefore(self, node, refNode):
index = self.element.index(refNode.element)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
old_node = self.element.contents[index-1]
new_str = self.soup.new_string(old_node + node.element)
old_node.replace_with(new_str)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
node.element.extract()
def reparentChildren(self, new_parent):
"""Move all of this tag's children into another tag."""
element = self.element
new_parent_element = new_parent.element
# Determine what this tag's next_element will be once all the children
# are removed.
final_next_element = element.next_sibling
new_parents_last_descendant = new_parent_element._last_descendant(False, False)
if len(new_parent_element.contents) > 0:
# The new parent already contains children. We will be
# appending this tag's children to the end.
new_parents_last_child = new_parent_element.contents[-1]
new_parents_last_descendant_next_element = new_parents_last_descendant.next_element
else:
# The new parent contains no children.
new_parents_last_child = None
new_parents_last_descendant_next_element = new_parent_element.next_element
to_append = element.contents
append_after = new_parent.element.contents
if len(to_append) > 0:
# Set the first child's previous_element and previous_sibling
# to elements within the new parent
first_child = to_append[0]
first_child.previous_element = new_parents_last_descendant
first_child.previous_sibling = new_parents_last_child
# Fix the last child's next_element and next_sibling
last_child = to_append[-1]
last_child.next_element = new_parents_last_descendant_next_element
last_child.next_sibling = None
for child in to_append:
child.parent = new_parent_element
new_parent_element.contents.append(child)
# Now that this element has no children, change its .next_element.
element.contents = []
element.next_element = final_next_element
def cloneNode(self):
tag = self.soup.new_tag(self.element.name, self.namespace)
node = Element(tag, self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
html5lib.treebuilders._base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
| {
"repo_name": "yongshengwang/builthue",
"path": "desktop/core/ext-py/requests-2.0.0/requests/packages/bs4/builder/_html5lib.py",
"copies": "2",
"size": "10641",
"license": "apache-2.0",
"hash": -7843711672085510000,
"line_mean": 36.3368421053,
"line_max": 159,
"alpha_frac": 0.6218400526,
"autogenerated": false,
"ratio": 4.249600638977636,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017694947753963556,
"num_lines": 285
} |
__all__ = (
'HTTPException',
'HTTPError',
'HTTPRedirection',
'HTTPSuccessful',
'HTTPOk',
'HTTPCreated',
'HTTPAccepted',
'HTTPNonAuthoritativeInformation',
'HTTPNoContent',
'HTTPResetContent',
'HTTPPartialContent',
'HTTPMultipleChoices',
'HTTPMovedPermanently',
'HTTPFound',
'HTTPSeeOther',
'HTTPNotModified',
'HTTPUseProxy',
'HTTPTemporaryRedirect',
'HTTPClientError',
'HTTPBadRequest',
'HTTPUnauthorized',
'HTTPPaymentRequired',
'HTTPForbidden',
'HTTPNotFound',
'HTTPMethodNotAllowed',
'HTTPNotAcceptable',
'HTTPProxyAuthenticationRequired',
'HTTPRequestTimeout',
'HTTPConflict',
'HTTPGone',
'HTTPLengthRequired',
'HTTPPreconditionFailed',
'HTTPRequestEntityTooLarge',
'HTTPRequestURITooLong',
'HTTPUnsupportedMediaType',
'HTTPRequestRangeNotSatisfiable',
'HTTPExpectationFailed',
'HTTPServerError',
'HTTPInternalServerError',
'HTTPNotImplemented',
'HTTPBadGateway',
'HTTPServiceUnavailable',
'HTTPGatewayTimeout',
'HTTPVersionNotSupported',
)
from aio2py.required.aiohttp.web_reqrep import Response
############################################################
# HTTP Exceptions
############################################################
class HTTPException(Response, Exception):
# You should set in subclasses:
# status = 200
status_code = None
def __init__(self, *, headers=None, reason=None,
body=None, text=None, content_type=None):
Response.__init__(self, status=self.status_code,
headers=headers, reason=reason,
body=body, text=text, content_type=content_type)
Exception.__init__(self, self.reason)
if self.body is None:
self.text = "{}: {}".format(self.status, self.reason)
class HTTPError(HTTPException):
"""Base class for exceptions with status codes in the 400s and 500s."""
class HTTPRedirection(HTTPException):
"""Base class for exceptions with status codes in the 300s."""
class HTTPSuccessful(HTTPException):
"""Base class for exceptions with status codes in the 200s."""
class HTTPOk(HTTPSuccessful):
status_code = 200
class HTTPCreated(HTTPSuccessful):
status_code = 201
class HTTPAccepted(HTTPSuccessful):
status_code = 202
class HTTPNonAuthoritativeInformation(HTTPSuccessful):
status_code = 203
class HTTPNoContent(HTTPSuccessful):
status_code = 204
class HTTPResetContent(HTTPSuccessful):
status_code = 205
class HTTPPartialContent(HTTPSuccessful):
status_code = 206
############################################################
# 3xx redirection
############################################################
class _HTTPMove(HTTPRedirection):
def __init__(self, location, *, headers=None, reason=None,
body=None, text=None, content_type=None):
if not location:
raise ValueError("HTTP redirects need a location to redirect to.")
super().__init__(headers=headers, reason=reason,
body=body, text=text, content_type=content_type)
self.headers['Location'] = location
self.location = location
class HTTPMultipleChoices(_HTTPMove):
status_code = 300
class HTTPMovedPermanently(_HTTPMove):
status_code = 301
class HTTPFound(_HTTPMove):
status_code = 302
# This one is safe after a POST (the redirected location will be
# retrieved with GET):
class HTTPSeeOther(_HTTPMove):
status_code = 303
class HTTPNotModified(HTTPRedirection):
# FIXME: this should include a date or etag header
status_code = 304
class HTTPUseProxy(_HTTPMove):
# Not a move, but looks a little like one
status_code = 305
class HTTPTemporaryRedirect(_HTTPMove):
status_code = 307
############################################################
# 4xx client error
############################################################
class HTTPClientError(HTTPError):
pass
class HTTPBadRequest(HTTPClientError):
status_code = 400
class HTTPUnauthorized(HTTPClientError):
status_code = 401
class HTTPPaymentRequired(HTTPClientError):
status_code = 402
class HTTPForbidden(HTTPClientError):
status_code = 403
class HTTPNotFound(HTTPClientError):
status_code = 404
class HTTPMethodNotAllowed(HTTPClientError):
status_code = 405
def __init__(self, method, allowed_methods, *, headers=None, reason=None,
body=None, text=None, content_type=None):
allow = ','.join(sorted(allowed_methods))
super().__init__(headers=headers, reason=reason,
body=body, text=text, content_type=content_type)
self.headers['Allow'] = allow
self.allowed_methods = allowed_methods
self.method = method.upper()
class HTTPNotAcceptable(HTTPClientError):
status_code = 406
class HTTPProxyAuthenticationRequired(HTTPClientError):
status_code = 407
class HTTPRequestTimeout(HTTPClientError):
status_code = 408
class HTTPConflict(HTTPClientError):
status_code = 409
class HTTPGone(HTTPClientError):
status_code = 410
class HTTPLengthRequired(HTTPClientError):
status_code = 411
class HTTPPreconditionFailed(HTTPClientError):
status_code = 412
class HTTPRequestEntityTooLarge(HTTPClientError):
status_code = 413
class HTTPRequestURITooLong(HTTPClientError):
status_code = 414
class HTTPUnsupportedMediaType(HTTPClientError):
status_code = 415
class HTTPRequestRangeNotSatisfiable(HTTPClientError):
status_code = 416
class HTTPExpectationFailed(HTTPClientError):
status_code = 417
############################################################
# 5xx Server Error
############################################################
# Response status codes beginning with the digit "5" indicate cases in
# which the server is aware that it has erred or is incapable of
# performing the request. Except when responding to a HEAD request, the
# server SHOULD include an entity containing an explanation of the error
# situation, and whether it is a temporary or permanent condition. User
# agents SHOULD display any included entity to the user. These response
# codes are applicable to any request method.
class HTTPServerError(HTTPError):
pass
class HTTPInternalServerError(HTTPServerError):
status_code = 500
class HTTPNotImplemented(HTTPServerError):
status_code = 501
class HTTPBadGateway(HTTPServerError):
status_code = 502
class HTTPServiceUnavailable(HTTPServerError):
status_code = 503
class HTTPGatewayTimeout(HTTPServerError):
status_code = 504
class HTTPVersionNotSupported(HTTPServerError):
status_code = 505
| {
"repo_name": "lfblogs/aio2py",
"path": "aio2py/required/aiohttp/web_exceptions.py",
"copies": "1",
"size": "6801",
"license": "apache-2.0",
"hash": -6364510862985858000,
"line_mean": 22.6145833333,
"line_max": 78,
"alpha_frac": 0.6497573886,
"autogenerated": false,
"ratio": 4.334608030592734,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 288
} |
__all__ = [
'HttpMetricApi',
]
import logging
import time
from dogapi.constants import MetricType
logger = logging.getLogger('dd.dogapi')
class HttpMetricApi(object):
default_metric_type = MetricType.Gauge
def metric(self, name, points, host=None, device=None, tags=None, metric_type=MetricType.Gauge):
"""
Submit a point or series of *points* to the metric API, optionally specifying a *host*
or *device*. Points can either be a value,
a tuple of POSIX timestamps and a value, or a list of timestamp value pairs.
>>> dog_http_api.metric('my.value', 123.4, host="my.custom.host")
>>> dog_http_api.metric('my.pair', (1317652676, 15), device="eth0")
>>> dog_http_api.metric('my.series', [(1317652676, 15), (1317652800, 16)])
"""
if host is None:
host = self._default_host
now = time.time()
if isinstance(points, (float, int)):
points = [(now, points)]
elif isinstance(points, tuple):
points = [points]
return self.metrics([{
'metric': name,
'points': [[ts, val] for ts, val in points],
'type': metric_type,
'host': host,
'device': device,
'tags' : tags
}])
def metrics(self, metrics):
"""
Submit a list of *metrics* with 1 or more data points to the metric API. Each metric is a dictionary
that includes the fields metric_name, points and optionally, host and device to scope the metric.
>>> dog_http_api.metrics([{'metric':'my.metric', 'points':[(1317652676, 15)]}])
"""
logger.debug("Submitting metrics to the api")
return self._submit_metrics(metrics)
def _submit_metrics(self, metrics):
logger.debug("flushing metrics over http.")
request = { "series": metrics }
return self.http_request('POST', '/series', request)
| {
"repo_name": "DataDog/dogapi",
"path": "src/dogapi/http/metrics.py",
"copies": "1",
"size": "1967",
"license": "bsd-3-clause",
"hash": -5863638129847229000,
"line_mean": 31.7833333333,
"line_max": 108,
"alpha_frac": 0.5871886121,
"autogenerated": false,
"ratio": 3.8568627450980393,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49440513571980393,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'HttpServer',
]
import logging
import socket
import ssl
import sys
from . import wsgi
logging.getLogger(__name__).addHandler(logging.NullHandler())
VERSION = '%s/v1' % __name__
class HttpServer:
def __init__(self, server_socket, application):
address = server_socket.getsockname()
is_ssl = isinstance(server_socket.target, ssl.SSLSocket)
self._base_environ = {
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'https' if is_ssl else 'http',
'wsgi.multithread': True,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'wsgi.file_wrapper': wsgi.FileWrapper,
# Should we wrap sys.stderr in an async adapter?
'wsgi.errors': sys.stderr,
'SERVER_SOFTWARE': VERSION,
'SERVER_NAME': socket.getfqdn(address[0]),
'SERVER_PORT': address[1],
'SERVER_PROTOCOL': 'HTTP/1.1',
'SCRIPT_NAME': '',
}
self._application = application
async def __call__(self, sock, address):
base_environ = self._base_environ.copy()
base_environ['REMOTE_ADDR'] = address[0]
base_environ['REMOTE_PORT'] = address[1]
session = wsgi.HttpSession(sock, self._application, base_environ)
return await session()
| {
"repo_name": "clchiou/garage",
"path": "py/g1/http/http1_servers/g1/http/http1_servers/__init__.py",
"copies": "1",
"size": "1331",
"license": "mit",
"hash": 7398082729926357000,
"line_mean": 29.25,
"line_max": 73,
"alpha_frac": 0.5785123967,
"autogenerated": false,
"ratio": 3.846820809248555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9925333205948554,
"avg_score": 0,
"num_lines": 44
} |
__all__ = [
'HttpServer',
]
import logging
import socket
import ssl
from . import nghttp2 as ng
from . import wsgi
logging.getLogger(__name__).addHandler(logging.NullHandler())
VERSION = '%s/nghttp2=%s' % (
__name__,
# pylint: disable=no-member
ng.F.nghttp2_version(0).contents.version_str.decode('utf-8'),
)
class HttpServer:
def __init__(self, server_socket, application):
address = server_socket.getsockname()
is_ssl = isinstance(server_socket.target, ssl.SSLSocket)
self._environ = {
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'https' if is_ssl else 'http',
'wsgi.multithread': True,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'SERVER_SOFTWARE': VERSION,
'SERVER_NAME': socket.getfqdn(address[0]),
'SERVER_PORT': address[1],
'SERVER_PROTOCOL': 'HTTP/2.0',
}
self._application = application
async def __call__(self, client_socket, address):
environ = self._environ.copy()
environ['REMOTE_ADDR'] = address[0]
environ['REMOTE_PORT'] = address[1]
session = wsgi.HttpSession(
client_socket, address, self._application, environ
)
return await session.serve()
| {
"repo_name": "clchiou/garage",
"path": "py/g1/http/http2_servers/g1/http/http2_servers/__init__.py",
"copies": "1",
"size": "1306",
"license": "mit",
"hash": 8414811375016516000,
"line_mean": 27.3913043478,
"line_max": 65,
"alpha_frac": 0.5842266462,
"autogenerated": false,
"ratio": 3.7207977207977208,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48050243669977205,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'IdentityDecoder',
'ChunkedDecoder',
'DeflateDecoder',
'CompoundDecoder',
]
from kitsu.http.errors import *
from kitsu.http.headers import *
from kitsu.http.parsers import *
class IdentityDecoder(Parser):
def __init__(self, length=None):
self.length = length
def parseRaw(self, data):
if self.length is None:
body, data = data, ''
elif self.length:
body, data = data[:self.length], data[self.length:]
self.length -= len(body)
if self.length == 0:
self.done = True
else:
body = ''
if data:
self.prepend(data)
if body:
return (body,)
return ()
def finish(self):
if not self.done:
self.done = True
if self.length:
raise HTTPDataError("not enough data for content body")
return ()
class ChunkedDecoder(LineParser):
def __init__(self):
self.length = None
self.extensions = None
self.headers = None
def parseLine(self, line):
if self.headers is not None:
# Reading trailer headers
if not self.headers.parseLine(line):
self.done = True
return (self.headers,)
elif self.length == 0:
# Just finished reading chunk
if line:
raise HTTPDataError("chunk data must end with '\\r\\n'")
self.length = None
else:
# Reading chunk header
parts = line.split(';', 1)
length = int(parts[0],16)
if len(parts) >= 2:
extensions = parts[0].strip()
else:
extensions = None
self.length = length
self.extensions = extensions
if self.length == 0:
# Start reading trailer headers
self.headers = Headers()
else:
# Start reading chunk data
self.setDataMode()
return ()
def parseData(self, data):
body, data = data[:self.length], data[self.length:]
self.length -= len(body)
if self.length == 0:
self.setLineMode(data)
return (body,)
def finish(self):
if not self.done:
self.done = True
raise HTTPDataError("not enough data for chunked body")
return ()
class DeflateDecoder(Parser):
def __init__(self):
from zlib import decompressobj
self.obj = decompressobj()
def parseRaw(self, data):
data = self.obj.decompress(data)
if data:
return (data,)
return ()
def finish(self):
if not self.done:
self.done = True
data = self.obj.flush()
self.prepend(self.obj.unused_data)
self.obj = None
if data:
return (data,)
return ()
class CompoundDecoder(Parser):
def __init__(self, *args):
self.decoders = list(args)
def _process(self, chunks, finish=False):
first = self.decoders[0]
for decoder in self.decoders:
output = []
for chunk in chunks:
if isinstance(chunk, basestring):
output.extend(decoder.parse(chunk))
else:
output.append(chunk)
if finish:
output.extend(decoder.finish())
if decoder is first:
self.done = self.done or decoder.done
chunks = output
return chunks
def clear(self):
return self.decoders[0].clear()
def parseRaw(self, data):
result = self._process((data,))
if self.done:
# Outer decoder finished
# Chain finish calls
result = list(result)
result.extend(self._process((), True))
return result
def finish(self):
if not self.done:
self.done = True
return self._process((), True)
return ()
requestMethodsWithoutBody = frozenset(('HEAD', 'CONNECT'))
responseCodesWithoutBody = frozenset((204, 304))
@classmethod
def from_response(cls, request, response):
# process Content-Length
if getattr(request, 'ignore_content_length', False):
contentLength = None
else:
contentLength = response.headers.getlist('Content-Length')
if contentLength:
contentLength = contentLength[-1]
if contentLength:
try:
contentLength = int(contentLength)
except ValueError:
raise HTTPDataError("invalid Content-Length header")
else:
contentLength = None
else:
contentLength = None
if request.method in cls.requestMethodsWithoutBody:
contentLength = 0
if contentLength is None and response.code in cls.responseCodesWithoutBody:
contentLength = 0
if contentLength == 0:
return None
# process Transfer-Encoding
encodings = response.headers.get('Transfer-Encoding')
if encodings is None:
encodings = 'identity'
encodings = [encoding.strip() for encoding in encodings.split(',')]
encodings.reverse()
decoders = []
baseDecoderFound = False
for encoding in encodings:
encoding = encoding.split(';', 1)[0] # strip parameters
encoding = encoding.strip().lower()
if encoding == 'chunked':
if decoders:
raise HTTPDataError("'chunked' must be the last Transfer-Encoding in chain")
decoders.append(ChunkedDecoder())
baseDecoderFound = True
elif encoding == 'identity':
if decoders:
raise HTTPDataError("'identity' must be the last Transfer-Encoding in chain")
decoders.append(IdentityDecoder(contentLength))
baseDecoderFound = True
elif encoding == 'deflate':
decoders.append(DeflateDecoder())
else:
# TODO: implement gzip, bzip2?
raise HTTPDataError("no decoder for Transfer-Encoding %r" % (encoding,))
if not baseDecoderFound:
# Don't fail if identity not specified
decoders.insert(0, IdentityDecoder(contentLength))
return cls(*decoders)
| {
"repo_name": "snaury/kitsu.http",
"path": "kitsu/http/decoders.py",
"copies": "1",
"size": "6611",
"license": "mit",
"hash": 8266131573049647000,
"line_mean": 31.5665024631,
"line_max": 97,
"alpha_frac": 0.5315383452,
"autogenerated": false,
"ratio": 4.7664023071377075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5797940652337708,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'ImproperlyConfigured', 'ElasticsearchException', 'SerializationError',
'TransportError', 'NotFoundError', 'ConflictError', 'RequestError', 'ConnectionError',
'SSLError', 'ConnectionTimeout', 'AuthenticationException', 'AuthorizationException'
]
class ImproperlyConfigured(Exception):
"""
Exception raised when the config passed to the client is inconsistent or invalid.
"""
class ElasticsearchException(Exception):
"""
Base class for all exceptions raised by this package's operations (doesn't
apply to :class:`~elasticsearch.ImproperlyConfigured`).
"""
class SerializationError(ElasticsearchException):
"""
Data passed in failed to serialize properly in the ``Serializer`` being
used.
"""
class TransportError(ElasticsearchException):
"""
Exception raised when ES returns a non-OK (>=400) HTTP status code. Or when
an actual connection error happens; in that case the ``status_code`` will
be set to ``'N/A'``.
"""
@property
def status_code(self):
"""
The HTTP status code of the response that precipitated the error or
``'N/A'`` if not applicable.
"""
return self.args[0]
@property
def error(self):
""" A string error message. """
return self.args[1]
@property
def info(self):
""" Dict of returned error info from ES, where available. """
return self.args[2]
def __str__(self):
cause = ''
try:
if self.info:
cause = ', %r' % self.info['error']['root_cause'][0]['reason']
except LookupError:
pass
return 'TransportError(%s, %r%s)' % (self.status_code, self.error, cause)
class ConnectionError(TransportError):
"""
Error raised when there was an exception while talking to ES. Original
exception from the underlying :class:`~elasticsearch.Connection`
implementation is available as ``.info.``
"""
def __str__(self):
return 'ConnectionError(%s) caused by: %s(%s)' % (
self.error, self.info.__class__.__name__, self.info)
class SSLError(ConnectionError):
""" Error raised when encountering SSL errors. """
class ConnectionTimeout(ConnectionError):
""" A network timeout. Doesn't cause a node retry by default. """
def __str__(self):
return 'ConnectionTimeout caused by - %s(%s)' % (
self.info.__class__.__name__, self.info)
class NotFoundError(TransportError):
""" Exception representing a 404 status code. """
class ConflictError(TransportError):
""" Exception representing a 409 status code. """
class RequestError(TransportError):
""" Exception representing a 400 status code. """
class AuthenticationException(TransportError):
""" Exception representing a 401 status code. """
class AuthorizationException(TransportError):
""" Exception representing a 403 status code. """
# more generic mappings from status_code to python exceptions
HTTP_EXCEPTIONS = {
400: RequestError,
401: AuthenticationException,
403: AuthorizationException,
404: NotFoundError,
409: ConflictError,
}
| {
"repo_name": "brunobell/elasticsearch-py",
"path": "elasticsearch/exceptions.py",
"copies": "2",
"size": "3180",
"license": "apache-2.0",
"hash": 1805538558205631200,
"line_mean": 28.1743119266,
"line_max": 90,
"alpha_frac": 0.6528301887,
"autogenerated": false,
"ratio": 4.472573839662447,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011164130331576157,
"num_lines": 109
} |
__all__ = [
'InfrastructureApi',
]
class InfrastructureApi(object):
def search(self, query):
"""
Search datadog for hosts and metrics by name. The search *query* can be
faceted to limit the results (e.g. ``"host:foo"``, or ``"metric:bar"``)
or un-faceted, which will return results of all types (e.g. ``"baz"``).
Return a dictionary mapping each queried facet to a list of name
strings.
>>> dog_http_api.search("cassandra")
{ "results": {
"hosts": ["cassandraHostA", "cassandraHostB", ...],
"metrics": ["cassandra.load", "cassandra.requests", ...]
}
}
"""
return self.http_request('GET', '/search', q=query,
response_formatter=lambda x: x['results'],
)
def all_tags(self, source=None):
"""
Get a list of tags for your org and their member hosts.
>>> dog_http_api.all_tags()
[ { 'tag1': [ 'host1', 'host2', ... ] }, ... ]
"""
params = {}
if source:
params['source'] = source
return self.http_request('GET', '/tags/hosts',
response_formatter=lambda x: x['tags'],
**params
)
def host_tags(self, host_id, source=None, by_source=False):
"""
Get a list of tags for the specified host by name or id.
>>> dog_http_api.host_tags('web.example.com')
['web', 'env:production']
>>> dog_http_api.host_tags(1234)
['database', 'env:test']
"""
params = {}
if source:
params['source'] = source
if by_source:
params['by_source'] = 'true'
return self.http_request('GET', '/tags/hosts/' + str(host_id),
response_formatter=lambda x: x['tags'],
**params
)
def add_tags(self, host_id, tags, source=None):
"""add_tags(host_id, [tag1, tag2, ...])
Add one or more tags to a host.
>>> dog_http_api.add_tags(host_id, ['env:test'])
>>> dog_http_api.add_tags(host_id, ['env:test', 'database'])
"""
if isinstance(tags, basestring):
tags = [tags]
body = {
'tags': tags,
}
params = {}
if source:
params['source'] = source
return self.http_request('POST', '/tags/hosts/' + str(host_id), body,
response_formatter=lambda x: x['tags'],
**params
)
def change_tags(self, host_id, tags, source=None):
"""change_tags(host_id, [tag1, tag2, ...])
Replace a host's tags with one or more new tags.
>>> dog_http_api.change_tags(host_id, ['env:test'])
>>> dog_http_api.change_tags(host_id, ['env:test', 'database'])
"""
if isinstance(tags, basestring):
tags = [tags]
body = {
'tags': tags
}
params = {}
if source:
params['source'] = source
return self.http_request('PUT', '/tags/hosts/' + str(host_id), body,
response_formatter=lambda x: x['tags'],
**params
)
def detach_tags(self, host_id, source=None):
"""
Remove all tags from a host.
>>> dog_http_api.detach_tags(123)
"""
params = {}
if source:
params['source'] = source
return self.http_request('DELETE', '/tags/hosts/' + str(host_id),
**params
)
| {
"repo_name": "edx/dogapi",
"path": "src/dogapi/http/infrastructure.py",
"copies": "3",
"size": "3488",
"license": "bsd-3-clause",
"hash": -5995819106621650000,
"line_mean": 30.7090909091,
"line_max": 79,
"alpha_frac": 0.501146789,
"autogenerated": false,
"ratio": 3.795429815016322,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5796576604016321,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'init',
'make_ops_dirs',
]
import dataclasses
import logging
import tempfile
from pathlib import Path
import g1.files
from g1 import scripts
from g1.bases.assertions import ASSERT
from g1.containers import models as ctr_models
from g1.containers import scripts as ctr_scripts
from g1.texts import jsons
from . import bases
from . import envs as ops_envs
from . import models
from . import repos
from . import systemds
from . import tokens
LOG = logging.getLogger(__name__)
class PodBundleDir(repos.AbstractBundleDir):
deploy_instruction_type = models.PodDeployInstruction
def post_init(self):
ASSERT.predicate(self.path, Path.is_dir)
ASSERT.predicate(self.deploy_instruction_path, Path.is_file)
ASSERT.all((path for _, path in self.iter_images()), Path.is_file)
ASSERT.all((path for _, path in self.iter_volumes()), Path.is_file)
def iter_images(self):
for image in self.deploy_instruction.images:
yield image, (
self.path / \
models.POD_BUNDLE_IMAGES_DIR_NAME /
image.name /
models.POD_BUNDLE_IMAGE_FILENAME
)
def iter_volumes(self):
for volume in self.deploy_instruction.volumes:
yield volume, (
self.path / \
models.POD_BUNDLE_VOLUMES_DIR_NAME /
volume.name /
models.POD_BUNDLE_VOLUME_FILENAME
)
class PodOpsDir(repos.AbstractOpsDir):
metadata_type = models.PodMetadata
# XXX: This annotation works around pylint no-member false errors.
metadata: object
@staticmethod
def _get_pod_id_set(metadata):
return {config.pod_id for config in metadata.systemd_unit_configs}
def check_invariants(self, active_ops_dirs):
# We check uniqueness of UUIDs here, but to be honest, UUID is
# quite unlikely to conflict.
for ops_dir in active_ops_dirs:
ASSERT.isdisjoint(
self._get_pod_id_set(ops_dir.metadata),
self._get_pod_id_set(self.metadata),
)
def install(self, bundle_dir, target_ops_dir_path):
ASSERT.isinstance(bundle_dir, PodBundleDir)
log_args = (bundle_dir.label, bundle_dir.version)
# Make metadata first so that uninstall may roll back properly.
LOG.debug('pods install: metadata: %s %s', *log_args)
metadata, groups = self._make_metadata(bundle_dir.deploy_instruction)
jsons.dump_dataobject(metadata, self.metadata_path)
bases.set_file_attrs(self.metadata_path)
# Sanity check of the just-written metadata file.
ASSERT.equal(self.label, bundle_dir.label)
ASSERT.equal(self.version, bundle_dir.version)
ASSERT.equal(self.metadata, metadata)
LOG.debug(
'pods install: pod ids: %s %s: %s', *log_args, ', '.join(groups)
)
LOG.debug('pods install: volumes: %s %s', *log_args)
bases.make_dir(self.volumes_dir_path)
for volume, volume_path in bundle_dir.iter_volumes():
volume_dir_path = self.volumes_dir_path / volume.name
LOG.debug('pods: extract: %s -> %s', volume_path, volume_dir_path)
bases.make_dir(ASSERT.not_predicate(volume_dir_path, Path.exists))
scripts.tar_extract(
volume_path,
directory=volume_dir_path,
extra_args=(
'--same-owner',
'--same-permissions',
),
)
LOG.debug('pods install: images: %s %s', *log_args)
for _, image_path in bundle_dir.iter_images():
ctr_scripts.ctr_import_image(image_path)
LOG.debug('pods install: tokens: %s %s', *log_args)
assignments = {}
with tokens.make_tokens_database().writing() as active_tokens:
for pod_id in groups:
assignments[pod_id] = {
alias: active_tokens.assign(token_name, pod_id, alias)
for alias, token_name in
bundle_dir.deploy_instruction.token_names.items()
}
envs = ops_envs.load()
LOG.debug('pods install: prepare pods: %s %s', *log_args)
bases.make_dir(self.refs_dir_path)
for pod_id, group in groups.items():
pod_config = self._make_pod_config(
bundle_dir.deploy_instruction,
target_ops_dir_path,
systemds.make_envs(
pod_id,
self.metadata,
group.envs,
envs,
assignments[pod_id],
),
)
with tempfile.NamedTemporaryFile() as config_tempfile:
config_path = Path(config_tempfile.name)
jsons.dump_dataobject(pod_config, config_path)
ctr_scripts.ctr_prepare_pod(pod_id, config_path)
ctr_scripts.ctr_add_ref_to_pod(pod_id, self.refs_dir_path / pod_id)
LOG.debug('pods install: systemd units: %s %s', *log_args)
units = {(pod_id, unit.name): unit
for pod_id, group in groups.items() for unit in group.units}
for config in self.metadata.systemd_unit_configs:
systemds.install(
config,
self.metadata,
groups[config.pod_id],
units[config.pod_id, config.name],
envs,
assignments[config.pod_id],
)
systemds.daemon_reload()
return True
@staticmethod
def _make_metadata(deploy_instruction):
groups = {}
systemd_unit_configs = []
for group in deploy_instruction.systemd_unit_groups:
pod_id = ctr_models.generate_pod_id()
groups[pod_id] = group
systemd_unit_configs.extend(
models.PodMetadata.SystemdUnitConfig(
pod_id=pod_id,
name=unit.name,
auto_start=unit.auto_start,
auto_stop=unit.auto_stop,
) for unit in group.units
)
metadata = models.PodMetadata(
label=deploy_instruction.label,
version=deploy_instruction.version,
images=deploy_instruction.images,
systemd_unit_configs=systemd_unit_configs,
)
return metadata, groups
@staticmethod
def _make_pod_config(deploy_instruction, target_ops_dir_path, envs):
def volume_to_mount(volume):
return ctr_models.PodConfig.Mount(
source=str(
target_ops_dir_path / \
models.OPS_DIR_VOLUMES_DIR_NAME /
volume.name
),
target=volume.target,
read_only=volume.read_only,
)
return dataclasses.replace(
deploy_instruction.pod_config_template,
apps=[
dataclasses.replace(
app,
exec=[arg.format_map(envs) for arg in app.exec],
service_section=(
app.service_section if app.service_section is None else
app.service_section.format_map(envs)
),
) for app in deploy_instruction.pod_config_template.apps
],
mounts=[
*deploy_instruction.pod_config_template.mounts,
*map(volume_to_mount, deploy_instruction.volumes),
],
)
def start(self, *, unit_names=None, all_units=False):
"""Enable and start the requested units."""
ASSERT.not_all((unit_names is not None, all_units))
LOG.info('pods start: %s %s', self.label, self.version)
if unit_names is not None:
predicate = lambda config: config.name in unit_names
elif all_units:
predicate = None
else:
predicate = lambda config: config.auto_start
for config in self._filter_pod_ids_and_units(predicate):
systemds.activate(config)
def restart(self, *, unit_names=None, all_units=False):
"""Restart the requested units.
NOTE: `restart` is not equivalent to `stop` followed by `start`
for two reasons:
* `start` and `stop` enables and disables the requested units,
but `restart` does not.
* The default behavior (when both `unit_names` and `all_units`
are not set) of `restart` only restarts units of which both
`auto_start` and `auto_stop` are true, which is usually what
you want. But the default behavior of `stop` followed by
`start` stops or starts only units of which `auto_start` or
`auto_stop` is true. So units of which `auto_start` is false
will be stopped but not restarted, and units of which
`auto_stop` is false will not be stopped and thus not
restarted; this is generally not what you want.
"""
ASSERT.not_all((unit_names is not None, all_units))
LOG.info('pods restart: %s %s', self.label, self.version)
if unit_names is not None:
predicate = lambda config: config.name in unit_names
elif all_units:
predicate = None
else:
predicate = lambda config: config.auto_start and config.auto_stop
for config in self._filter_pod_ids_and_units(predicate):
systemds.restart(config)
def stop(self, *, unit_names=None, all_units=False):
"""Disable and stop the requested units."""
LOG.info('pods stop: %s %s', self.label, self.version)
if unit_names is not None:
predicate = lambda config: config.name in unit_names
elif all_units:
predicate = None
else:
predicate = lambda config: config.auto_stop
for config in self._filter_pod_ids_and_units(predicate):
systemds.deactivate(config)
def stop_all(self):
self.stop(all_units=True)
def _filter_pod_ids_and_units(self, predicate):
return filter(predicate, self.metadata.systemd_unit_configs)
def uninstall(self):
if not self.metadata_path.exists():
LOG.info('skip: pods uninstall: metadata was removed')
ASSERT.predicate(self.path, g1.files.is_empty_dir)
return False
log_args = (self.label, self.version)
LOG.debug('pods uninstall: systemd units: %s %s', *log_args)
for config in self.metadata.systemd_unit_configs:
systemds.uninstall(config)
systemds.daemon_reload()
LOG.debug('pods uninstall: pods: %s %s', *log_args)
g1.files.remove(self.refs_dir_path)
for pod_id in self._get_pod_id_set(self.metadata):
ctr_scripts.ctr_remove_pod(pod_id)
LOG.debug('pods uninstall: tokens: %s %s', *log_args)
with tokens.make_tokens_database().writing() as active_tokens:
for config in self.metadata.systemd_unit_configs:
active_tokens.unassign_all(config.pod_id)
LOG.debug('pods uninstall: images: %s %s', *log_args)
for image in self.metadata.images:
ctr_scripts.ctr_remove_image(image, skip_active=True)
LOG.debug('pods uninstall: volumes: %s %s', *log_args)
g1.files.remove(self.volumes_dir_path)
LOG.debug('pods uninstall: metadata: %s %s', *log_args)
g1.files.remove(self.metadata_path) # Remove metadata last.
ASSERT.predicate(self.path, g1.files.is_empty_dir)
return True
def init():
repos.OpsDirs.init(_get_ops_dirs_path())
def make_ops_dirs():
return repos.OpsDirs(
models.REPO_PODS_DIR_NAME,
_get_ops_dirs_path(),
bundle_dir_type=PodBundleDir,
ops_dir_type=PodOpsDir,
)
def _get_ops_dirs_path():
return bases.get_repo_path() / models.REPO_PODS_DIR_NAME
| {
"repo_name": "clchiou/garage",
"path": "py/g1/operations/cores/g1/operations/cores/pod_ops_dirs.py",
"copies": "1",
"size": "12030",
"license": "mit",
"hash": 2669948795422736400,
"line_mean": 36.7115987461,
"line_max": 79,
"alpha_frac": 0.5804655029,
"autogenerated": false,
"ratio": 3.9007782101167314,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.99783633164513,
"avg_score": 0.0005760793130864077,
"num_lines": 319
} |
__all__ = [
'init',
'make_ops_dirs',
]
import logging
from pathlib import Path
import g1.files
from g1.bases.assertions import ASSERT
from g1.containers import scripts as ctr_scripts
from g1.texts import jsons
from . import bases
from . import models
from . import repos
LOG = logging.getLogger(__name__)
class XarBundleDir(repos.AbstractBundleDir):
deploy_instruction_type = models.XarDeployInstruction
def post_init(self):
ASSERT.predicate(self.path, Path.is_dir)
ASSERT.predicate(self.deploy_instruction_path, Path.is_file)
if self.deploy_instruction.is_zipapp():
ASSERT.predicate(self.zipapp_path, Path.is_file)
else:
ASSERT.predicate(self.image_path, Path.is_file)
@property
def zipapp_path(self):
ASSERT.true(self.deploy_instruction.is_zipapp())
return self.path / models.XAR_BUNDLE_ZIPAPP_FILENAME
@property
def image_path(self):
ASSERT.false(self.deploy_instruction.is_zipapp())
return self.path / models.XAR_BUNDLE_IMAGE_FILENAME
class XarOpsDir(repos.AbstractOpsDir):
metadata_type = models.XarMetadata
# XXX: This annotation works around pylint no-member false errors.
metadata: object
@property
def zipapp_target_path(self):
ASSERT.true(self.metadata.is_zipapp())
return bases.get_zipapp_target_path(self.metadata.name)
def check_invariants(self, active_ops_dirs):
for ops_dir in active_ops_dirs:
ASSERT(
ops_dir.metadata.name != self.metadata.name,
'expect unique xar label name: {}, {}',
ops_dir.label,
self.label,
)
def install(self, bundle_dir, target_ops_dir_path):
del target_ops_dir_path # Unused.
ASSERT.isinstance(bundle_dir, XarBundleDir)
log_args = (bundle_dir.label, bundle_dir.version)
# Make metadata first so that uninstall may roll back properly.
LOG.info('xars install: metadata: %s %s', *log_args)
jsons.dump_dataobject(
models.XarMetadata(
label=bundle_dir.label,
version=bundle_dir.version,
image=bundle_dir.deploy_instruction.image,
),
self.metadata_path,
)
bases.set_file_attrs(self.metadata_path)
# Sanity check of the just-written metadata file.
ASSERT.equal(self.label, bundle_dir.label)
ASSERT.equal(self.version, bundle_dir.version)
if bundle_dir.deploy_instruction.is_zipapp():
LOG.info('xars install: zipapp: %s %s', *log_args)
bases.copy_exec(bundle_dir.zipapp_path, self.zipapp_target_path)
else:
LOG.info('xars install: xar: %s %s', *log_args)
ctr_scripts.ctr_import_image(bundle_dir.image_path)
ctr_scripts.ctr_install_xar(
bundle_dir.deploy_instruction.name,
bundle_dir.deploy_instruction.exec_relpath,
bundle_dir.deploy_instruction.image,
)
return True
def start(self):
pass # Nothing here.
def stop(self):
pass # Nothing here.
def stop_all(self):
pass # Nothing here.
def uninstall(self):
if not self.metadata_path.exists():
LOG.info('skip: xars uninstall: metadata was removed')
ASSERT.predicate(self.path, g1.files.is_empty_dir)
return False
log_args = (self.label, self.version)
if self.metadata.is_zipapp():
LOG.info('xars uninstall: zipapp: %s %s', *log_args)
g1.files.remove(self.zipapp_target_path)
else:
LOG.info('xars uninstall: xar: %s %s', *log_args)
ctr_scripts.ctr_uninstall_xar(self.metadata.name)
ctr_scripts.ctr_remove_image(self.metadata.image)
g1.files.remove(self.metadata_path) # Remove metadata last.
ASSERT.predicate(self.path, g1.files.is_empty_dir)
return True
def init():
repos.OpsDirs.init(_get_ops_dirs_path())
def make_ops_dirs():
return repos.OpsDirs(
models.REPO_XARS_DIR_NAME,
_get_ops_dirs_path(),
bundle_dir_type=XarBundleDir,
ops_dir_type=XarOpsDir,
)
def _get_ops_dirs_path():
return bases.get_repo_path() / models.REPO_XARS_DIR_NAME
| {
"repo_name": "clchiou/garage",
"path": "py/g1/operations/cores/g1/operations/cores/xar_ops_dirs.py",
"copies": "1",
"size": "4367",
"license": "mit",
"hash": 2612508407486844000,
"line_mean": 30.4172661871,
"line_max": 76,
"alpha_frac": 0.6185024044,
"autogenerated": false,
"ratio": 3.485235434956105,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46037378393561057,
"avg_score": null,
"num_lines": null
} |
__all__ = ["NinjaTurtleAPI", "RenderTurtleAPI", "FullNinjaTurtleAPI"]
# Animation methods
# -----------------
# These methods are handled by the NinjaTurtle class and engine, and are to do
# with movement and position.
NinjaTurtleAPI = [
'forward', 'fd',
'backward', 'bk', 'back',
'right', 'rt',
'left', 'lt',
'position', 'pos',
'setheading', 'seth',
'speed',
'xcor',
'ycor',
'heading',
#TODO
'goto', 'setpos', 'setposition',
'setx',
'sety',
'settiltangle',
'tiltangle',
'tilt',
'towards',
'distance',
'home',
'reset',
'circle',
'clone',
# not doing these currently
#'resizemode',
#'undo',
#'setundobuffer',
#'undobufferentries',
#'degrees',
#'radians',
#'getturtle', 'getpen',
#'getscreen',
]
# Renderer Turtle methods
#-----------------------
#
#These methods depend on the renderer for implementation
RenderTurtleAPI = [
'shape',
'shapesize', 'turtlesize',
'pencolor',
#TODO
'color',
'fillcolor',
'showturtle', 'st',
'hideturtle', 'ht',
'isvisible',
'pendown', 'pd', 'down',
'penup', 'pu', 'up',
'pensize', 'width',
'pen',
'isdown',
'begin_fill', 'end_fill',
'dot',
'stamp',
'clear',
'clearstamp', 'clearstamps',
'write',
# currently unsupported
#'begin_poly',
#'end_poly',
#'get_poly',
]
FullNinjaTurtleAPI = NinjaTurtleAPI + RenderTurtleAPI
_msg = "NinjaTurtleAPI and RenderTurtleAPI are not disjoint!"
assert len(set(NinjaTurtleAPI) & set(RenderTurtleAPI)) == 0, _msg
| {
"repo_name": "AllTheWayDown/ninjaturtle",
"path": "ninjaturtle/turtleapi.py",
"copies": "1",
"size": "1610",
"license": "mit",
"hash": 4411687448325537000,
"line_mean": 18.1666666667,
"line_max": 78,
"alpha_frac": 0.5559006211,
"autogenerated": false,
"ratio": 3.23943661971831,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.429533724081831,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'InprocServer',
]
import functools
from g1.bases import collections as g1_collections
from . import utils
class InprocServer:
"""Expose an asynchronous application in the same process.
This does not even use the inproc transport, omitting serialization
and memory-copying of messages entirely.
"""
def __init__(
self,
application,
request_type,
response_type,
*,
internal_server_error_type=None,
):
self._application = application
self._declared_error_types = frozenset(
utils.get_declared_error_types(response_type)
)
self._internal_server_error_type = internal_server_error_type
self.m = g1_collections.Namespace(
**{
name: self._make_transceiver(getattr(self._application, name))
for name in request_type.m
}
)
def _make_transceiver(self, func):
@functools.wraps(func)
async def wrapper(**kwargs):
return await self._transceive(func, kwargs)
return wrapper
async def _transceive(self, func, kwargs):
try:
return await func(**kwargs)
except Exception as exc:
if (
self._internal_server_error_type is None
or type(exc) in self._declared_error_types # pylint: disable=unidiomatic-typecheck
):
raise
raise self._internal_server_error_type from exc
| {
"repo_name": "clchiou/garage",
"path": "py/g1/messaging/g1/messaging/reqrep/inprocs.py",
"copies": "1",
"size": "1519",
"license": "mit",
"hash": -2782363643091988000,
"line_mean": 26.125,
"line_max": 99,
"alpha_frac": 0.5865701119,
"autogenerated": false,
"ratio": 4.402898550724638,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00017857142857142857,
"num_lines": 56
} |
__all__ = [
'json_schema',
'Encoder',
'TypeEncoder',
'ModifierEncoder',
'AnyEncoder',
'StringEncoder',
'NumberEncoder',
'BooleanEncoder',
'DateTimeEncoder',
'DateEncoder',
'TimeEncoder',
'ListEncoder',
'TupleEncoder',
'ObjectEncoder',
'DictEncoder',
'OneOfEncoder',
'ConstantEncoder',
]
import lollipop.type_registry as lr
import lollipop.types as lt
import lollipop.validators as lv
from lollipop.utils import identity, is_mapping
from collections import OrderedDict, namedtuple
from .compat import itervalues, iteritems
import re
def find_validators(schema, validator_type):
return [validator
for validator in schema.validators
if isinstance(validator, validator_type)]
class Definition(object):
def __init__(self, name):
self.name = name
self.jsonschema = None
def _sanitize_name(name):
valid_chars_name = re.sub('[^a-zA-Z0-9-_]+', ' ', name).strip()
camel_cased_name = re.sub('[_ ]+([a-z])', lambda m: m.group(1).upper(),
valid_chars_name)
return camel_cased_name
def has_modifier(schema, modifier):
while isinstance(schema, (lt.Modifier, lr.TypeRef)):
if isinstance(schema, modifier):
return True
schema = schema.inner_type
return False
def is_optional(schema):
return has_modifier(schema, lt.Optional)
class TypeEncoder(object):
schema_type = object
def match(self, schema):
return isinstance(schema, self.schema_type)
def json_schema(self, encoder, schema):
js = OrderedDict()
if schema.name:
js['title'] = schema.name
if schema.description:
js['description'] = schema.description
any_of_validators = find_validators(schema, lv.AnyOf)
if any_of_validators:
choices = set(any_of_validators[0].choices)
for validator in any_of_validators[1:]:
choices = choices.intersection(set(validator.choices))
if not choices:
raise ValueError('AnyOf constraints choices does not allow any values')
js['enum'] = list(schema.dump(choice) for choice in choices)
none_of_validators = find_validators(schema, lv.NoneOf)
if none_of_validators:
choices = set(none_of_validators[0].values)
for validator in none_of_validators[1:]:
choices = choices.union(set(validator.values))
if choices:
js['not'] = {'enum': list(schema.dump(choice) for choice in choices)}
return js
def is_dump_schema(schema):
return not has_modifier(schema, lt.LoadOnly)
def is_load_schema(schema):
return not has_modifier(schema, lt.DumpOnly)
class ModifierEncoder(TypeEncoder):
schema_type = lt.Modifier
def json_schema(self, encoder, schema):
js = encoder.json_schema(schema.inner_type)
if js is None:
return None
if isinstance(schema, lt.Optional):
default = schema.load_default()
if default is None:
js['default'] = None
elif default is not lt.MISSING:
js['default'] = schema.inner_type.dump(default)
elif encoder.mode and (
(encoder.mode == 'dump' and not is_dump_schema(schema)) or
(encoder.mode == 'load' and not is_load_schema(schema))
):
return None
return js
class AnyEncoder(TypeEncoder):
schema_type = lt.Any
class StringEncoder(TypeEncoder):
schema_type = lt.String
def json_schema(self, encoder, schema):
js = super(StringEncoder, self).json_schema(encoder, schema)
js['type'] = 'string'
length_validators = find_validators(schema, lv.Length)
if length_validators:
exact_values = [
v.exact for v in length_validators if v.exact is not None
]
min_values = (
[v.min for v in length_validators if v.min is not None]
+ exact_values
)
if min_values:
js['minLength'] = max(min_values)
max_values = (
[v.max for v in length_validators if v.max is not None]
+ exact_values
)
if max_values:
js['maxLength'] = min(max_values)
regexp_validators = find_validators(schema, lv.Regexp)
if regexp_validators:
js['pattern'] = regexp_validators[0].regexp.pattern
return js
class NumberEncoder(TypeEncoder):
schema_type = lt.Number
def json_schema(self, encoder, schema):
js = super(NumberEncoder, self).json_schema(encoder, schema)
if isinstance(schema, lt.Integer):
js['type'] = 'integer'
else:
js['type'] = 'number'
range_validators = find_validators(schema, lv.Range)
if range_validators:
min_values = [v.min for v in range_validators if v.min is not None]
if min_values:
js['minimum'] = max(min_values)
max_values = [v.max for v in range_validators if v.max is not None]
if max_values:
js['maximum'] = min(max_values)
return js
class BooleanEncoder(TypeEncoder):
schema_type = lt.Boolean
def json_schema(self, encoder, schema):
js = super(BooleanEncoder, self).json_schema(encoder, schema)
js['type'] = 'boolean'
return js
class DateTimeEncoder(TypeEncoder):
schema_type = lt.DateTime
def json_schema(self, encoder, schema):
js = super(DateTimeEncoder, self).json_schema(encoder, schema)
js['type'] = 'string'
js['format'] = 'date-time'
return js
class DateEncoder(TypeEncoder):
schema_type = lt.Date
def json_schema(self, encoder, schema):
js = super(DateEncoder, self).json_schema(encoder, schema)
js['type'] = 'string'
js['format'] = 'date'
return js
class TimeEncoder(TypeEncoder):
schema_type = lt.Time
def json_schema(self, encoder, schema):
js = super(TimeEncoder, self).json_schema(encoder, schema)
js['type'] = 'string'
js['format'] = 'time'
return js
class ListEncoder(TypeEncoder):
schema_type = lt.List
def json_schema(self, encoder, schema):
js = super(ListEncoder, self).json_schema(encoder, schema)
js['type'] = 'array'
item_schema = encoder.json_schema(schema.item_type)
if item_schema is None:
js['maxItems'] = 0
else:
js['items'] = item_schema
length_validators = find_validators(schema, lv.Length)
if length_validators:
exact_values = [
v.exact for v in length_validators if v.exact is not None
]
min_values = (
[v.min for v in length_validators if v.min is not None]
+ exact_values
)
if min_values:
js['minItems'] = max(min_values)
max_values = (
[v.max for v in length_validators if v.max is not None]
+ exact_values
)
if max_values:
js['maxItems'] = min(max_values)
unique_validators = find_validators(schema, lv.Unique)
if unique_validators and any(v.key is identity for v in unique_validators):
js['uniqueItems'] = True
return js
class TupleEncoder(TypeEncoder):
schema_type = lt.Tuple
def json_schema(self, encoder, schema):
js = super(TupleEncoder, self).json_schema(encoder, schema)
js['type'] = 'array'
items_schema = [
item_schema
for item_type in schema.item_types
for item_schema in [encoder.json_schema(item_type)]
if item_schema is not None
]
if not items_schema:
js['maxItems'] = 0
else:
js['items'] = items_schema
return js
def is_type(schema, schema_type):
while isinstance(schema, (lt.Modifier, lr.TypeRef)):
schema = schema.inner_type
return isinstance(schema, schema_type)
class ObjectEncoder(TypeEncoder):
schema_type = lt.Object
def json_schema(self, encoder, schema):
js = super(ObjectEncoder, self).json_schema(encoder, schema)
js['type'] = 'object'
properties = OrderedDict(
(field_name, field_schema)
for field_name, field in iteritems(schema.fields)
for field_schema in [encoder.json_schema(field.field_type)]
if field_schema is not None
)
if properties:
js['properties'] = properties
required = [
field_name
for field_name, field in iteritems(schema.fields)
if not is_optional(field.field_type) and field_name in js['properties']
]
if required:
js['required'] = required
if schema.allow_extra_fields in [True, False]:
js['additionalProperties'] = schema.allow_extra_fields
elif isinstance(schema.allow_extra_fields, lt.Field):
field_type = schema.allow_extra_fields.field_type
field_schema = encoder.json_schema(field_type)
if field_schema is not None:
if is_type(field_type, lt.Any):
js['additionalProperties'] = True
else:
js['additionalProperties'] = field_schema
if not js.get('properties') and not js.get('additionalProperties'):
js['maxProperties'] = 0
return js
class DictEncoder(TypeEncoder):
schema_type = lt.Dict
def json_schema(self, encoder, schema):
js = super(DictEncoder, self).json_schema(encoder, schema)
js['type'] = 'object'
properties = OrderedDict(
(k, value_schema)
for k, v in iteritems(schema.value_types)
for value_schema in [encoder.json_schema(v)]
if value_schema is not None
)
if properties:
js['properties'] = properties
required = [
k
for k, v in iteritems(schema.value_types)
if not is_optional(v) and k in properties
]
if required:
js['required'] = required
if hasattr(schema.value_types, 'default'):
additional_schema = encoder.json_schema(schema.value_types.default)
if additional_schema is not None:
js['additionalProperties'] = additional_schema
if not js.get('properties') and not js.get('additionalProperties'):
js['maxProperties'] = 0
return js
class OneOfEncoder(TypeEncoder):
schema_type = lt.OneOf
def json_schema(self, encoder, schema):
js = super(OneOfEncoder, self).json_schema(encoder, schema)
types = itervalues(schema.types) \
if is_mapping(schema.types) else schema.types
js['anyOf'] = [
variant_schema
for variant in types
for variant_schema in [encoder.json_schema(variant)]
if variant_schema is not None
]
if not js['anyOf']:
return None
return js
class ConstantEncoder(TypeEncoder):
schema_type = lt.Constant
def json_schema(self, encoder, schema):
js = super(ConstantEncoder, self).json_schema(encoder, schema)
js['const'] = schema.value
return js
class SchemaUsageCounter(object):
def __init__(self, type_encoders):
self._type_encoders = type_encoders
self.counts = {}
def json_schema(self, schema, force_render=False):
if isinstance(schema, lr.TypeRef):
schema = schema.inner_type
if schema in self.counts:
self.counts[schema] += 1
return
self.counts[schema] = 1
for type_encoder in self._type_encoders:
if type_encoder.match(schema):
type_encoder.json_schema(self, schema)
break
class JsonSchemaGenerator(object):
def __init__(self, type_encoders, definitions=None, mode=None):
self.type_encoders = type_encoders
self.definitions = definitions
self.mode = mode
def json_schema(self, schema, force_render=False):
if isinstance(schema, lr.TypeRef):
schema = schema.inner_type
if schema in self.definitions and not force_render:
return {'$ref': '#/definitions/' + self.definitions[schema].name}
js = None
for type_encoder in self.type_encoders:
if type_encoder.match(schema):
js = type_encoder.json_schema(self, schema)
break
return js
class Encoder(object):
def __init__(self):
self._encoders = []
self.add_encoder(ModifierEncoder())
self.add_encoder(AnyEncoder())
self.add_encoder(StringEncoder())
self.add_encoder(NumberEncoder())
self.add_encoder(BooleanEncoder())
self.add_encoder(DateTimeEncoder())
self.add_encoder(DateEncoder())
self.add_encoder(TimeEncoder())
self.add_encoder(ListEncoder())
self.add_encoder(TupleEncoder())
self.add_encoder(ObjectEncoder())
self.add_encoder(DictEncoder())
self.add_encoder(OneOfEncoder())
self.add_encoder(ConstantEncoder())
def add_encoder(self, encoder):
self._encoders.insert(0, encoder)
def json_schema(self, schema, definitions=None, mode=None):
"""Convert Lollipop schema to JSON schema."""
is_top_level_schema = definitions is None
if definitions is None:
definitions = {}
definition_names = {definition.name
for definition in itervalues(definitions)}
counter = SchemaUsageCounter(self._encoders)
counter.json_schema(schema)
counts = counter.counts
for schema1, count in iteritems(counts):
if count == 1:
continue
if schema1 not in definitions:
def_name = _sanitize_name(schema1.name) if schema1.name else 'Type'
if def_name in definition_names:
i = 1
while def_name + str(i) in definition_names:
i += 1
def_name += str(i)
definitions[schema1] = Definition(def_name)
definition_names.add(def_name)
generator = JsonSchemaGenerator(self._encoders, definitions=definitions, mode=mode)
for schema1, definition in iteritems(definitions):
if definition.jsonschema is not None:
continue
definitions[schema1].jsonschema = generator.json_schema(
schema1, force_render=True,
)
js = generator.json_schema(schema)
if is_top_level_schema and definitions:
js['definitions'] = {definition.name: definition.jsonschema
for definition in itervalues(definitions)}
return js
_DEFAULT_ENCODER = Encoder()
json_schema = _DEFAULT_ENCODER.json_schema
| {
"repo_name": "maximkulkin/lollipop-jsonschema",
"path": "lollipop_jsonschema/jsonschema.py",
"copies": "1",
"size": "15427",
"license": "mit",
"hash": 7456907588266180000,
"line_mean": 28.6103646833,
"line_max": 91,
"alpha_frac": 0.5793738251,
"autogenerated": false,
"ratio": 4.20698118352877,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5286355008628769,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'json_schema',
]
import lollipop.types as lt
import lollipop.validators as lv
from lollipop.utils import identity
from collections import OrderedDict
from .compat import iteritems
def find_validators(schema, validator_type):
return [validator
for validator in schema.validators
if isinstance(validator, validator_type)]
def json_schema(schema):
"""Convert Lollipop schema to JSON schema"""
js = OrderedDict()
if schema.name:
js['title'] = schema.name
if schema.description:
js['description'] = schema.description
any_of_validators = find_validators(schema, lv.AnyOf)
if any_of_validators:
choices = set(any_of_validators[0].choices)
for validator in any_of_validators[1:]:
choices = choices.intersection(set(validator.choices))
if not choices:
raise ValueError('AnyOf constraints choices does not allow any values')
js['enum'] = list(schema.dump(choice) for choice in choices)
return js
none_of_validators = find_validators(schema, lv.NoneOf)
if none_of_validators:
choices = set(none_of_validators[0].values)
for validator in none_of_validators[1:]:
choices = choices.union(set(validator.values))
if choices:
js['not'] = {'enum': list(schema.dump(choice) for choice in choices)}
if isinstance(schema, lt.Any):
pass
elif isinstance(schema, lt.String):
js['type'] = 'string'
length_validators = find_validators(schema, lv.Length)
if length_validators:
if any(v.min for v in length_validators) or \
any(v.exact for v in length_validators):
js['minLength'] = max(v.exact or v.min for v in length_validators)
if any(v.max for v in length_validators) or \
any(v.exact for v in length_validators):
js['maxLength'] = min(v.exact or v.max for v in length_validators)
regexp_validators = find_validators(schema, lv.Regexp)
if regexp_validators:
js['pattern'] = regexp_validators[0].regexp.pattern
elif isinstance(schema, lt.Number):
if isinstance(schema, lt.Integer):
js['type'] = 'integer'
else:
js['type'] = 'number'
range_validators = find_validators(schema, lv.Range)
if range_validators:
if any(v.min for v in range_validators):
js['minimum'] = max(v.min for v in range_validators if v.min)
if any(v.max for v in range_validators):
js['maximum'] = min(v.max for v in range_validators if v.max)
elif isinstance(schema, lt.Boolean):
js['type'] = 'boolean'
elif isinstance(schema, lt.List):
js['type'] = 'array'
js['items'] = json_schema(schema.item_type)
length_validators = find_validators(schema, lv.Length)
if length_validators:
if any(v.min for v in length_validators) or \
any(v.exact for v in length_validators):
js['minItems'] = min(v.exact or v.min for v in length_validators)
if any(v.max for v in length_validators) or \
any(v.exact for v in length_validators):
js['maxItems'] = min(v.exact or v.max for v in length_validators)
unique_validators = find_validators(schema, lv.Unique)
if unique_validators and any(v.key is identity for v in unique_validators):
js['uniqueItems'] = True
elif isinstance(schema, lt.Tuple):
js['type'] = 'array'
js['items'] = [json_schema(item_type) for item_type in schema.item_types]
elif isinstance(schema, lt.Object):
js['type'] = 'object'
js['properties'] = OrderedDict(
(k, json_schema(v.field_type))
for k, v in iteritems(schema.fields)
)
required = [
k
for k, v in iteritems(schema.fields)
if not isinstance(v.field_type, lt.Optional)
]
if required:
js['required'] = required
if schema.allow_extra_fields in [True, False]:
js['additionalProperties'] = schema.allow_extra_fields
elif isinstance(schema.allow_extra_fields, lt.Field):
field_type = schema.allow_extra_fields.field_type
if isinstance(field_type, lt.Any):
js['additionalProperties'] = True
else:
js['additionalProperties'] = json_schema(field_type)
elif isinstance(schema, lt.Dict):
js['type'] = 'object'
fixed_properties = schema.value_types \
if hasattr(schema.value_types, 'keys') else {}
properties = OrderedDict(
(k, json_schema(v))
for k, v in iteritems(fixed_properties)
)
if properties:
js['properties'] = properties
required = [
k
for k, v in iteritems(fixed_properties)
if not isinstance(v, lt.Optional)
]
if required:
js['required'] = required
if hasattr(schema.value_types, 'default'):
js['additionalProperties'] = json_schema(schema.value_types.default)
elif isinstance(schema, lt.Constant):
js['const'] = schema.value
elif isinstance(schema, lt.Optional):
js.update(json_schema(schema.inner_type))
default = schema.load_default()
if default:
js['default'] = schema.inner_type.dump(default)
elif hasattr(schema, 'inner_type'):
js.update(json_schema(schema.inner_type))
return js
| {
"repo_name": "akscram/lollipop-jsonschema",
"path": "lollipop_jsonschema/jsonschema.py",
"copies": "1",
"size": "5661",
"license": "mit",
"hash": -3347910036887420400,
"line_mean": 36.74,
"line_max": 83,
"alpha_frac": 0.5945945946,
"autogenerated": false,
"ratio": 4.052254831782391,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.514684942638239,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Kernel',
]
import collections
import contextlib
import functools
import inspect
import logging
import os
import sys
import threading
import time
import weakref
from g1.bases import classes
from g1.bases import timers
from g1.bases.assertions import ASSERT
from . import blockers
from . import errors
from . import pollers
from . import tasks
from . import traps
LOG = logging.getLogger(__name__)
TaskReady = collections.namedtuple(
'TaskReady',
'task trap_result trap_exception',
)
KernelStats = collections.namedtuple(
'KernelStats',
[
'num_ticks',
'num_tasks',
'num_ready',
# Blocking trap stats.
'num_join',
'num_poll',
'num_sleep',
'num_blocked',
# Disrupter stats.
'num_to_raise',
'num_timeout',
# Async generators.
'num_async_generators',
],
)
class Kernel:
def __init__(self, *, owner=None, sanity_check_frequency=100):
self._owner = owner or threading.get_ident()
self._closed = False
self._num_ticks = 0
self._sanity_check_frequency = sanity_check_frequency
# Tasks are juggled among these collections.
self._num_tasks = 0
self._current_task = None
self._ready_tasks = collections.deque()
self._task_completion_blocker = blockers.TaskCompletionBlocker()
self._read_blocker = blockers.DictBlocker()
self._write_blocker = blockers.DictBlocker()
self._sleep_blocker = blockers.TimeoutBlocker()
self._generic_blocker = blockers.DictBlocker()
self._forever_blocker = blockers.ForeverBlocker()
self._async_generators = weakref.WeakSet()
# Track tasks that are going to raise at the next trap point
# due to ``cancel``, ``timeout_after``, etc. I call them
# **disrupter** because they "disrupt" blocking traps.
self._to_raise = {}
self._timeout_after_blocker = blockers.TimeoutBlocker()
self._poller = pollers.Epoll()
self._callbacks_lock = threading.Lock()
self._callbacks = collections.deque()
self._nudger = Nudger()
self._nudger.register_to(self._poller)
self._blocking_trap_handlers = {
traps.Traps.BLOCK: self._block,
traps.Traps.JOIN: self._join,
traps.Traps.POLL: self._poll,
traps.Traps.SLEEP: self._sleep,
}
def get_stats(self):
"""Return internal stats."""
return KernelStats(
num_ticks=self._num_ticks,
num_tasks=self._num_tasks,
num_ready=len(self._ready_tasks),
num_join=len(self._task_completion_blocker),
num_poll=len(self._read_blocker) + len(self._write_blocker),
num_sleep=len(self._sleep_blocker),
num_blocked=(
len(self._generic_blocker) + len(self._forever_blocker)
),
num_to_raise=len(self._to_raise),
num_timeout=len(self._timeout_after_blocker),
num_async_generators=len(self._async_generators),
)
__repr__ = classes.make_repr(
'{stats!r}',
stats=lambda self: self.get_stats(),
)
def close(self):
self._assert_owner()
if self._closed:
return
if self._async_generators:
LOG.warning(
'close: num non-finalized async generators: %d',
len(self._async_generators),
)
for async_generator in self._async_generators:
self._close_async_generator(async_generator)
for task in self.get_all_tasks():
if not task.is_completed():
task.abort()
self._poller.close()
self._nudger.close()
self._closed = True
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
def _assert_owner(self):
"""Assert that the calling thread is the owner."""
ASSERT.equal(threading.get_ident(), self._owner)
def _is_owner(self):
return threading.get_ident() == self._owner
def _sanity_check(self):
expect_num_tasks = self._num_tasks
actual_num_tasks = sum(
map(
len,
(
self._ready_tasks,
self._task_completion_blocker,
self._read_blocker,
self._write_blocker,
self._sleep_blocker,
self._generic_blocker,
self._forever_blocker,
),
)
)
if self._current_task:
actual_num_tasks += 1
ASSERT(
expect_num_tasks >= 0 and expect_num_tasks == actual_num_tasks,
'sanity check fail: {!r}',
self,
)
def run(self, awaitable=None, timeout=None):
"""Run spawned tasks through completion.
If ``awaitable`` is not ``None``, a task is spawned for it, and
when the task completes, ``run`` returns its result.
If ``timeout`` is non-positive, ``run`` is guarantee to iterate
exactly once.
"""
ASSERT.false(self._closed)
self._assert_owner()
ASSERT.none(self._current_task) # Disallow recursive calls.
main_task = self.spawn(awaitable) if awaitable else None
run_timer = timers.make(timeout)
while self._num_tasks > 0:
# Do sanity check every ``_sanity_check_frequency`` ticks.
if self._num_ticks % self._sanity_check_frequency == 0:
self._sanity_check()
self._num_ticks += 1
# Fire callbacks posted by other threads.
with self._callbacks_lock:
callbacks, self._callbacks = \
self._callbacks, collections.deque()
for callback in callbacks:
callback()
del callbacks
# Run all ready tasks.
with self._managing_async_generators():
while self._ready_tasks:
completed_task = self._run_one_ready_task()
if completed_task and completed_task is main_task:
# Return the result eagerly. If you want to run
# all remaining tasks through completion, just
# call ``run`` again with no arguments.
return completed_task.get_result_nonblocking()
if self._num_tasks > 0:
# Poll I/O.
now = time.monotonic()
poll_timeout = min(
run_timer.get_timeout(),
self._sleep_blocker.get_min_timeout(now),
self._timeout_after_blocker.get_min_timeout(now),
key=timers.timeout_to_key,
)
can_read, can_write = self._poller.poll(poll_timeout)
for fd in can_read:
if self._nudger.is_nudged(fd):
self._nudger.ack()
else:
self._trap_return(self._read_blocker, fd)
for fd in can_write:
self._trap_return(self._write_blocker, fd)
# Handle any task timeout.
now = time.monotonic()
self._trap_return(self._sleep_blocker, now)
self._timeout_after_on_completion(now)
# Break if ``run`` times out.
if run_timer.is_expired():
raise errors.KernelTimeout
def _run_one_ready_task(self):
task, trap_result, trap_exception = self._ready_tasks.popleft()
override = self._to_raise.pop(task, None)
if override is not None:
trap_result = None
trap_exception = override
self._current_task = task
try:
trap = task.tick(trap_result, trap_exception)
finally:
self._current_task = None
if trap is None:
ASSERT.true(task.is_completed())
self._trap_return(self._task_completion_blocker, task)
# Clear disrupter.
self._to_raise.pop(task, None)
self._timeout_after_blocker.cancel(task)
self._num_tasks -= 1
return task
ASSERT.false(task.is_completed())
override = self._to_raise.pop(task, None)
if override:
self._ready_tasks.append(TaskReady(task, None, override))
else:
handler = self._blocking_trap_handlers[trap.kind]
try:
handler(task, trap)
except Exception as exc:
self._ready_tasks.append(TaskReady(task, None, exc))
return None
#
# Async generator management.
#
@contextlib.contextmanager
def _managing_async_generators(self):
original_hooks = sys.get_asyncgen_hooks()
sys.set_asyncgen_hooks(
firstiter=self._async_generator_firstiter_hook,
finalizer=self._async_generator_finalizer_hook,
)
try:
yield
finally:
sys.set_asyncgen_hooks(*original_hooks)
def _async_generator_firstiter_hook(self, async_generator):
self._async_generators.add(async_generator)
def _async_generator_finalizer_hook(self, async_generator):
self._async_generators.discard(async_generator)
self._close_async_generator(async_generator)
@staticmethod
def _close_async_generator(async_generator):
closer = async_generator.aclose()
try:
closer.send(None)
except RuntimeError as exc:
LOG.warning('%s: %r', exc, async_generator)
except StopIteration:
pass
finally:
closer.close()
#
# Blocking traps.
#
def _block(self, task, trap):
ASSERT.is_(trap.kind, traps.Traps.BLOCK)
self._generic_blocker.block(trap.source, task)
if trap.post_block_callback:
trap.post_block_callback()
def _join(self, task, trap):
ASSERT.is_(trap.kind, traps.Traps.JOIN)
ASSERT.is_(trap.task._kernel, self)
ASSERT.is_not(trap.task, task) # You can't join yourself.
if trap.task.is_completed():
self._ready_tasks.append(TaskReady(task, None, None))
else:
self._task_completion_blocker.block(trap.task, task)
def _poll(self, task, trap):
ASSERT.is_(trap.kind, traps.Traps.POLL)
if trap.events is pollers.Polls.READ:
self._read_blocker.block(trap.fd, task)
else:
ASSERT.is_(trap.events, pollers.Polls.WRITE)
self._write_blocker.block(trap.fd, task)
def _sleep(self, task, trap):
ASSERT.is_(trap.kind, traps.Traps.SLEEP)
if trap.duration is None:
self._forever_blocker.block(None, task)
elif trap.duration <= 0:
self._ready_tasks.append(TaskReady(task, None, None))
else:
self._sleep_blocker.block(time.monotonic() + trap.duration, task)
#
# Non-blocking traps.
#
def get_current_task(self):
return self._current_task
def get_all_tasks(self):
"""Return a list of all tasks (useful for debugging)."""
self._assert_owner()
all_tasks = []
if self._current_task:
all_tasks.append(self._current_task)
all_tasks.extend(task_ready.task for task_ready in self._ready_tasks)
for task_collection in (
self._task_completion_blocker,
self._read_blocker,
self._write_blocker,
self._sleep_blocker,
self._generic_blocker,
self._forever_blocker,
):
all_tasks.extend(task_collection)
ASSERT.equal(len(all_tasks), self._num_tasks)
return all_tasks
def spawn(self, awaitable):
"""Spawn a new task onto the kernel."""
ASSERT.false(self._closed)
self._assert_owner()
if tasks.Task.is_coroutine(awaitable):
coroutine = awaitable
elif inspect.isawaitable(awaitable):
coroutine = awaitable.__await__()
else:
coroutine = awaitable()
task = tasks.Task(self, coroutine)
self._ready_tasks.append(TaskReady(task, None, None))
self._num_tasks += 1
return task
def notify_open(self, fd):
ASSERT.false(self._closed)
self._assert_owner()
self._poller.notify_open(fd)
def unblock(self, source):
"""Unblock tasks blocked by ``source``."""
ASSERT.false(self._closed)
self._assert_owner()
self._trap_return(self._generic_blocker, source)
def cancel(self, task):
"""Cancel the task.
This is a no-op is task has been completed.
"""
ASSERT.false(self._closed)
self._assert_owner()
ASSERT.is_(task._kernel, self)
if not task.is_completed():
self._disrupt(task, errors.TaskCancellation)
def timeout_after(self, task, duration):
ASSERT.false(self._closed)
self._assert_owner()
ASSERT.is_(task._kernel, self)
if duration is None:
return lambda: None
# Even if duration <= 0, the kernel should raise ``Timeout`` at
# the next blocking trap for consistency (so, don't raise here).
self._timeout_after_blocker.block(time.monotonic() + duration, task)
return functools.partial(self._timeout_after_blocker.cancel, task)
def _timeout_after_on_completion(self, now):
for task in self._timeout_after_blocker.unblock(now):
self._disrupt(task, errors.Timeout)
#
# Multi-threading interface.
#
def post_callback(self, callback):
ASSERT.false(self._closed)
with self._callbacks_lock:
self._callbacks.append(callback)
self._nudger.nudge()
def notify_close(self, fd):
ASSERT.false(self._closed)
self._poller.notify_close(fd)
if not self._is_owner():
self._nudger.nudge()
#
# Internal helpers.
#
def _disrupt(self, task, exc):
"""Raise ``exc`` in, and maybe unblock, the given ``task``."""
# NOTE: This method has to check **all** blockers to unblock the
# given ``task``.
self._to_raise[task] = exc
for blocker in (self._read_blocker, self._write_blocker):
fd = blocker.cancel(task)
if fd is not None:
# We do not have to unregister fd here because we are
# using edge-trigger.
self._ready_tasks.append(TaskReady(task, None, None))
return
is_unblocked = (
self._task_completion_blocker.cancel(task)
or self._sleep_blocker.cancel(task)
or self._generic_blocker.cancel(task)
or self._forever_blocker.cancel(task)
)
if is_unblocked:
self._ready_tasks.append(TaskReady(task, None, None))
return
def _trap_return(self, blocker, source):
for task in blocker.unblock(source):
self._ready_tasks.append(TaskReady(task, None, None))
class Nudger:
def __init__(self):
# Or should we use (Linux-specific) eventfd?
self._r, self._w = os.pipe()
os.set_blocking(self._r, False)
os.set_blocking(self._w, False)
def register_to(self, poller):
poller.notify_open(self._r)
# NOTE: We skip `notify_close` on in `close` below since Nudger
# is closed when the Kernel is closing.
def nudge(self):
try:
os.write(self._w, b'\x00')
except BlockingIOError:
pass
def is_nudged(self, fd):
return self._r == fd
def ack(self):
try:
# Drain the pipe.
while os.read(self._r, 4096):
pass
except BlockingIOError:
pass
def close(self):
os.close(self._r)
os.close(self._w)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/asyncs/kernels/g1/asyncs/kernels/kernels.py",
"copies": "1",
"size": "16166",
"license": "mit",
"hash": -373675982654959300,
"line_mean": 30.4513618677,
"line_max": 77,
"alpha_frac": 0.5575281455,
"autogenerated": false,
"ratio": 3.9778543307086616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5035382476208662,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'KernelTimeout',
'call_with_kernel',
'get_kernel',
'run',
'with_kernel',
]
import contextlib
import contextvars
import functools
import logging
from . import contexts
from . import kernels
# Re-export errors.
from .errors import KernelTimeout
logging.getLogger(__name__).addHandler(logging.NullHandler())
def with_kernel(func):
"""Wrap ``func`` that it is called within a kernel context."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
return call_with_kernel(func, *args, **kwargs)
return wrapper
def call_with_kernel(func, *args, **kwargs):
"""Call ``func`` within a kernel context.
The kernel object is closed on return.
"""
def caller():
# Do not create nested kernels; this seems to make more sense.
# In general, I think it is easier to work with when there is
# always at most one global kernel object per thread.
if contexts.get_kernel(None) is None:
kernel = kernels.Kernel()
contexts.set_kernel(kernel)
cm = contextlib.closing(kernel)
else:
cm = contextlib.nullcontext()
with cm:
return func(*args, **kwargs)
return contextvars.copy_context().run(caller)
def run(awaitable=None, timeout=None):
return contexts.get_kernel().run(awaitable, timeout)
def get_kernel():
return contexts.get_kernel(None)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/asyncs/kernels/g1/asyncs/kernels/__init__.py",
"copies": "1",
"size": "1425",
"license": "mit",
"hash": 5452541043687833000,
"line_mean": 22.75,
"line_max": 70,
"alpha_frac": 0.6421052632,
"autogenerated": false,
"ratio": 3.969359331476323,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 60
} |
__all__ = [
'keys',
]
from pathlib import Path
import logging
from garage import apps
from garage import scripts
LOG = logging.getLogger(__name__)
HOST_KEYS = [
('dsa', 1024),
('ecdsa', 521),
('ed25519', None),
('rsa', 4096),
]
# ECDSA requires less bits than RSA at same level of strength and
# thus seems to be the best choice
USER_KEY_ALGORITHM = 'ecdsa'
USER_KEY_SIZE = 521
def ssh_host_key_filename(algorithm):
return 'ssh_host_%s_key' % algorithm
def ssh_user_key_filename(algorithm):
return 'id_' + algorithm
@apps.with_prog('gen-host-key')
@apps.with_help('generate host keys')
@apps.with_argument('output_dir', type=Path, help='set output directory')
def generate_host_key(args):
"""Generate SSH host keys with ssh-keygen."""
key_paths = [
args.output_dir / ssh_host_key_filename(algorithm)
for algorithm, _ in HOST_KEYS
]
okay = True
for key_path in key_paths:
if key_path.exists():
LOG.error('attempt to overwrite %s', key_path)
okay = False
if not okay:
return 1
scripts.mkdir(args.output_dir)
for (algorithm, key_size), key_path in zip(HOST_KEYS, key_paths):
cmd = [
'ssh-keygen',
'-t', algorithm,
'-N', '', # No password
'-C', 'root@localhost',
'-f', key_path,
]
if key_size:
cmd.extend(['-b', key_size])
scripts.execute(cmd)
return 0
@apps.with_prog('gen-user-key')
@apps.with_help('generate user key pair')
@apps.with_argument('output_dir', type=Path, help='set output directory')
def generate_user_key(args):
"""Generate SSH key pair with ssh-keygen."""
key_path = args.output_dir / ssh_user_key_filename(USER_KEY_ALGORITHM)
if key_path.exists():
LOG.error('attempt to overwrite %s', key_path)
return 1
scripts.mkdir(args.output_dir)
scripts.execute([
'ssh-keygen',
'-t', USER_KEY_ALGORITHM,
'-b', USER_KEY_SIZE,
'-C', 'plumber@localhost',
'-f', key_path,
])
return 0
@apps.with_help('manage security keys')
@apps.with_apps(
'operation', 'operation on keys',
generate_host_key,
generate_user_key,
)
def keys(args):
"""Manage security keys."""
return args.operation(args)
| {
"repo_name": "clchiou/garage",
"path": "py/ops/ops/mob/keys.py",
"copies": "1",
"size": "2338",
"license": "mit",
"hash": -8494565224786233000,
"line_mean": 22.6161616162,
"line_max": 74,
"alpha_frac": 0.5988023952,
"autogenerated": false,
"ratio": 3.344778254649499,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4443580649849499,
"avg_score": null,
"num_lines": null
} |
__all__ = [
"kill_qthread",
"QT",
"QTKiller"
]
import time
from PySide import QtCore
def kill_qthread(t):
if not t:
return
t.terminate()
# t.wait()
class QT(QtCore.QThread):
def __init__(self, func, *args, **kwargs):
QtCore.QThread.__init__(self)
self._func = func
self._args = args
self._kwargs = kwargs
self._return = None
def run(self):
self._return = self._func(*self._args, **self._kwargs)
self.emit(QtCore.SIGNAL("thread_finished()"))
def get_return(self):
return self._return
class QTKiller(QtCore.QThread):
def __init__(self, target_t, timeout = 10):
QtCore.QThread.__init__(self)
self._target_t = target_t
self._timeout = timeout
def run(self):
i = 0
while i < self._timeout:
time.sleep(1)
self.emit(QtCore.SIGNAL("thread_running()"))
i += 1
self.emit(QtCore.SIGNAL("kill_qthread()"))
while not self._target_t.isFinished():
time.sleep(0.1)
| {
"repo_name": "alexlib/Qt-Python-Binding-Examples",
"path": "qcommons/qthreadutils.py",
"copies": "2",
"size": "1085",
"license": "bsd-3-clause",
"hash": -5534238519669601000,
"line_mean": 20.2745098039,
"line_max": 62,
"alpha_frac": 0.5391705069,
"autogenerated": false,
"ratio": 3.522727272727273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008169934640522875,
"num_lines": 51
} |
__all__ = ['NList']
__doc__ = """This module provides class :class:`NList`, a multidimensional list.
Indexes and shapes used with NList must be tuples.
Example:
::
l = nlist.NList(shape=(2, 3))
l[1, 2] = 42
NList's shape can be an empty tuple meaning a zero-dimensional list that has
one element with index ().
NList converts to False only if its :attr:`size` is 0, meaning that
at least one of its dimensions is 0. Note that the :attr:`size` of a
zero-dimensional NList is 1.
An NList equals another NList if their shapes and all their elements are equal.
NList is an iterable of all its elements.
Whenever an ordering of indexes is implied,
standard tuple comparison semantics are used.
"""
import operator
import itertools
from itertools import islice
from collections.abc import Container, Iterable, Sequence
from functools import reduce
def product(l):
return reduce(operator.mul, l, 1)
def group_every_n(l, n):
rest = l
while True:
rest1, rest2 = itertools.tee(rest)
group, rest = list(islice(rest1, n)), islice(rest2, n, None)
if not group:
break
yield group
class NList:
"""Initialize NList either from another multidimensional structure
or by shape and default value.
:param other: Either an another NList or a nested sequence to copy data from.
For instance, if other is [[1, 2, 3], [4, 5, 6]], a 2x3 NList will be
created with this data.
:param tuple shape: A tuple of dimension sizes. E.g. (2, 3) for 2x3 NList.
:param default: A value to fill the NList with when `shape` is passed.
`other` and `shape`/`default` arguments are mutually exclusive
"""
def __init__(self, other=None, shape=None, default=None):
if other is not None:
if shape is not None or default is not None:
raise RuntimeError(
"'other' and 'shape'/'default' arguments are mutually exclusive"
)
if isinstance(other, NList):
self._init_from_nlist(other)
elif isinstance(other, Sequence):
self._init_from_nested(other)
else:
raise TypeError("'other' must be either NList or a Sequence")
else:
if shape is None:
shape = ()
self._init_from_shape(shape, default)
def _init_from_nlist(self, other):
self._data = other._data.copy()
self._shape = other.shape
self._strides = other._strides
def _init_from_nested(self, other):
shape = [len(other)]
values = other
while True:
if (all(isinstance(x, Sequence) for x in values) and
len({len(x) for x in values}) == 1):
shape.append(len(values[0]))
values = list(itertools.chain(*values))
else:
break
self._shape = tuple(shape)
self._build_strides()
self._data = values
def _init_from_shape(self, shape, default):
self._check_shape(shape)
self._shape = shape
self._build_strides()
self._data = [default] * self.size
def _build_strides(self):
self._strides = tuple(
product(self.shape[j] for j in range(k + 1, self.rank))
for k in range(self.rank)
)
@property
def shape(self):
"""A tuple with the NList's dimensions. Read-only."""
return self._shape
@property
def rank(self):
"""Number of the NList's dimensions. Read-only."""
return len(self.shape)
@property
def size(self):
"""Number of elements in the NList. Read-only."""
return product(self.shape)
def __bool__(self):
return self.size != 0
def __eq__(self, other):
return (
isinstance(other, NList) and
self.shape == other.shape and
self._data == other._data
)
def __getitem__(self, key):
return self._data[self._index_to_flat(key)]
def __setitem__(self, key, value):
self._data[self._index_to_flat(key)] = value
def __iter__(self):
return iter(self._data)
def __repr__(self):
nested = self._to_nested()
return 'NList(%s, shape=%s)' % (nested, self.shape)
def __str__(self):
return repr(self)
def copy(self):
"""Returns a shallow copy of the NList.
:rtype: NList
"""
return type(self)(other=self)
def count(self, value):
"""Returns the number of occurrences of `value` in the NList.
:rtype: int
"""
return self._data.count(value)
def keys(self, start=None, stop=None):
"""Returns an iterable of all indexes valid for the NList.
:param tuple start: An index to start iteration from.
:param tuple stop: An index before which to stop iteration.
`start` and `stop` must be valid indexes for the NList, or `None`.
"""
if start is not None:
self._check_index(start)
else:
start = (0,) * self.rank
if stop is not None:
self._check_index(stop)
current = start
while self._index_in_range(current, stop):
yield current
current = self._next_index(current)
def _index_in_range(self, index, stop):
return (
index is not None and
self._in_bounds(index) and
(stop is None or index < stop)
)
def _next_index(self, index):
current = list(index)
i = self.rank - 1
while i >= 0:
current[i] += 1
if current[i] >= self.shape[i]:
current[i] = 0
i -= 1
else:
return tuple(current)
else:
return None
def enumerate(self):
"""Return an iterable of all pairs (index, value) in the NList."""
for key in self.keys():
yield (key, self[key])
def index(self, value, start=None, stop=None):
"""Returns index of the first occurrence of `value` in the NList.
:param value: A value to search for.
:param tuple start: An index to start the search from.
:param tuple stop: An index before which to stop search.
:raises ValueError: If the value is not found.
:rtype: tuple
`start` and `stop` must be valid indexes for the NList, or `None`.
"""
for key in self.keys(start, stop):
if self[key] == value:
return key
raise ValueError('%s is not in the NList' % value)
def _to_nested(self):
if self.rank == 0:
return self._data[0]
if self.size == 0:
return []
nested = self._data
for dim in reversed(self.shape[1:]):
nested = group_every_n(nested, dim)
return list(nested)
def _check_index(self, index):
if not isinstance(index, tuple):
raise TypeError('NList index must be a tuple')
if len(index) != self.rank:
raise TypeError('NList index must be rank %s' % self.rank)
if any(not isinstance(x, int) for x in index):
raise TypeError('Indexes must consist of integers')
if not self._in_bounds(index):
raise IndexError('NList index out of range')
def _in_bounds(self, index):
for i, x in enumerate(index):
if not 0 <= x < self.shape[i]:
return False
return True
def _index_to_flat(self, index):
self._check_index(index)
return sum(self._strides[k] * index[k] for k in range(self.rank))
@staticmethod
def _check_shape(shape):
for x in shape:
if not isinstance(x, int):
raise TypeError('Dimensions must be integers')
if x < 0:
raise ValueError('Dimensions cannot be negative')
Container.register(NList)
Iterable.register(NList)
| {
"repo_name": "swarmer/nlist",
"path": "nlist.py",
"copies": "1",
"size": "8031",
"license": "mit",
"hash": -68766892974057380,
"line_mean": 29.3056603774,
"line_max": 84,
"alpha_frac": 0.5697920558,
"autogenerated": false,
"ratio": 4.013493253373313,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5083285309173313,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'localvms',
]
from pathlib import Path
import logging
import re
import time
from garage import apps
from garage import scripts
LOG = logging.getLogger(__name__)
@apps.with_help('create virtual machine')
@apps.with_argument(
'--virtual-machines-dir', required=True, type=Path,
help='provide path to VirtualBox virtual machines directory',
)
@apps.with_argument(
'--name', required=True,
help='set virtual machine name',
)
@apps.with_argument(
'--image', required=True, type=Path,
help='''provide path to virtual machine image to copy from (probably
in VMDK format)
''',
)
@apps.with_argument(
'--seed', required=True, type=Path,
help='provide path to ISO image of cloud-init user data',
)
@apps.with_argument(
'--ip-address', required=True,
help='''provide the IP address of host-only interface (this should
match the IP address recorded in the seed)
''',
)
def create(args):
"""Create virtual machine and initialize it with cloud-init."""
scripts.ensure_file(args.image)
scripts.ensure_file(args.seed)
# We need VDI image because when VirtualBox boots on an VMDK image,
# Linux kernel will report **lots** of ATA error and remount root
# file system in read-only mode. (I don't know why, but I hope this
# will be fixed in future version of VirtualBox.)
image_path = (
args.virtual_machines_dir / args.name /
args.image.with_suffix('.vdi').name
)
if image_path.exists():
LOG.error('attempt to overwrite %s', image_path)
return 1
LOG.info('create virtual machine')
scripts.execute([
'vboxmanage', 'createvm',
'--name', args.name,
'--ostype', 'Ubuntu_64',
'--register',
])
scripts.execute([
'vboxmanage', 'modifyvm', args.name,
'--memory', '1024',
'--boot1', 'disk',
'--nic1', 'nat',
# Enable host-only network
'--nic2', 'hostonly',
'--hostonlyadapter2', 'vboxnet0',
# Enable COM1, otherwise Linux kernel will be stuck at:
# random: nonblocking pool is initialized
# (I hope this is fixed in future version of VirtualBox.)
'--uart1', '0x3f8', '4',
'--uartmode1', 'disconnected',
])
# Add IDE for the seed image
scripts.execute([
'vboxmanage', 'storagectl', args.name,
'--name', 'IDE',
'--add', 'ide',
])
# Add SATA for the virtual machine image
scripts.execute([
'vboxmanage', 'storagectl', args.name,
'--name', 'SATA',
'--add', 'sata',
])
LOG.info('copy virtual machine image')
scripts.execute([
'vboxmanage', 'clonemedium', 'disk', args.image, image_path,
'--format', 'VDI',
])
scripts.execute([
'vboxmanage', 'modifyhd', image_path, '--resize', '25600',
])
scripts.execute([
'vboxmanage', 'storageattach', args.name,
'--storagectl', 'SATA',
'--port', '0',
'--device', '0',
'--type', 'hdd',
'--medium', image_path,
])
LOG.info('attach seed image')
# NOTE: It looks like if you remove the seed image, cloud-init will
# not function properly and you will not be able to login. (This is
# slightly annoying that you cannot later remove the seed image
# because it is now part of the snapshot.)
scripts.execute([
'vboxmanage', 'storageattach', args.name,
'--storagectl', 'IDE',
'--port', '0',
'--device', '0',
'--type', 'dvddrive',
'--medium', args.seed,
])
scripts.execute([
'vboxmanage', 'snapshot', args.name, 'take', 'created',
])
LOG.info('initialize virtual machine')
okay = True
scripts.execute([
'vboxmanage', 'startvm', args.name, '--type', 'headless',
])
if not wait_for_vm_bootstrapping(args.name, args.ip_address):
okay = False
scripts.execute([
'vboxmanage', 'controlvm', args.name, 'acpipowerbutton',
])
if not wait_for_vm_poweroff(args.name):
okay = False
if okay:
scripts.execute([
'vboxmanage', 'snapshot', args.name, 'take', 'initialized',
])
return 0 if okay else 1
def wait_for_vm_bootstrapping(name, ip_address):
LOG.info('wait for virtual machine bootstrapping')
cmd = ['ping', '-c', 1, ip_address]
# Err out after 60 failed pings
for _ in range(60):
if scripts.execute(cmd, check=False).returncode == 0:
return True
LOG.error('virtual machine %s is not responding to ping', name)
return False
def wait_for_vm_poweroff(name):
LOG.info('wait for virtual machine powering off')
# Could we not be polling vm state?
pattern = re.compile(br'State:\s*powered off')
cmd = ['vboxmanage', 'showvminfo', name]
for _ in range(60):
stdout = scripts.execute(cmd, capture_stdout=True).stdout
if pattern.search(stdout):
return True
time.sleep(1)
LOG.error('virtual machine %s is not powered off', name)
return False
@apps.with_prog('local-vms')
@apps.with_help('manage local virtual machines')
@apps.with_apps(
'operation', 'operation on virtual machines',
create,
)
def localvms(args):
"""Manage local virtual machines."""
return args.operation(args)
| {
"repo_name": "clchiou/garage",
"path": "py/ops/ops/mob/localvms.py",
"copies": "1",
"size": "5386",
"license": "mit",
"hash": 1821008466074332700,
"line_mean": 28.4316939891,
"line_max": 72,
"alpha_frac": 0.5997029335,
"autogenerated": false,
"ratio": 3.7325017325017327,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48322046660017326,
"avg_score": null,
"num_lines": null
} |
__all__ = [
"LoginFailure",
"QualysApi",
]
import requests
import xml.etree.ElementTree as ET
class LoginFailure(Exception): pass
class QualysApi(object):
API_ENDPOINT = "https://qualysapi.qualys.com/api/2.0/fo"
def __init__(self, username, password, endpoint="qualysapi.qualys.com"):
self.API_ENDPOINT = "https://{0}/api/2.0/fo".format(endpoint)
self.session = requests.Session()
self.login(username, password)
def _post(self, url, *args, **kwargs):
headers = kwargs.pop("headers", {})
headers.update({"X-Requested-With": "pyqualys 1.0"})
url = "{0}/{1}".format(self.API_ENDPOINT, url)
return self.session.post(url, *args, headers=headers, **kwargs)
def _get(self, url, *args, **kwargs):
headers = kwargs.pop("headers", {})
headers.update({"X-Requested-With": "pyqualys 1.0"})
url = "{0}/{1}".format(self.API_ENDPOINT, url)
return self.session.get(url, *args, headers=headers, **kwargs)
def __del__(self):
self.logout()
def login(self, username, password):
payload = {"action": "login", "username": username, "password": password}
r = self._post("/session/", data=payload)
xml = ET.fromstring(r.text)
if xml.tag == "SIMPLE_RETURN" and xml.find("./RESPONSE/CODE") is not None:
# Failed login
code = xml.find("./RESPONSE/CODE").text
text = xml.find("./RESPONSE/TEXT").text
raise LoginFailure("error_code={0}, error_text={1}".format(code, text))
def logout(self):
payload = {"action": "logout"}
self._post("/session/", data=payload)
@property
def scans(self):
payload = {"action": "list"}
r = self._get("/scan/", params=payload)
xml = ET.fromstring(r.text)
if hasattr(xml, "iterfind"):
return xml.iterfind("./RESPONSE/SCAN_LIST/SCAN")
else:
return xml.findall("./RESPONSE/SCAN_LIST/SCAN")
def get_scan(self, ref, full=False):
payload = {"action":"fetch", "scan_ref": ref, "output_format":"json"}
if full:
payload["mode"] = "extended"
r = self._get("/scan/", params=payload)
try:
return r.json()
except:
print r.text
raise
| {
"repo_name": "HurricaneLabs/TA-qualys",
"path": "bin/qualys/__init__.py",
"copies": "1",
"size": "2328",
"license": "mit",
"hash": -1852089004202463500,
"line_mean": 31.3333333333,
"line_max": 83,
"alpha_frac": 0.5704467354,
"autogenerated": false,
"ratio": 3.6661417322834646,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.972602826768028,
"avg_score": 0.0021120400006370715,
"num_lines": 72
} |
__all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
from io import BytesIO
from io import StringIO
import collections
from lxml import etree
from bs4.element import Comment, Doctype, NamespacedAttribute
from bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
ParserRejectedMarkup,
TreeBuilder,
XML)
from bs4.dammit import EncodingDetector
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
# Well, it's permissive by XML parser standards.
features = [LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
# This namespace mapping is specified in the XML Namespace
# standard.
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
def default_parser(self, encoding):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
if self._default_parser is not None:
return self._default_parser
return etree.XMLParser(
target=self, strip_cdata=False, recover=True, encoding=encoding)
def parser_for(self, encoding):
# Use the default parser.
parser = self.default_parser(encoding)
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False, encoding=encoding)
return parser
def __init__(self, parser=None, empty_element_tags=None):
# TODO: Issue a warning if parser is present but not a
# callable, since that means there's no way to create new
# parsers for different encodings.
self._default_parser = parser
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
self.soup = None
self.nsmaps = [self.DEFAULT_NSMAPS]
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:yield: A series of 4-tuples.
(markup, encoding, declared encoding,
has undergone character replacement)
Each 4-tuple represents a strategy for parsing the document.
"""
if isinstance(markup, str):
# We were given Unicode. Maybe lxml can parse Unicode on
# this system?
yield markup, None, document_declared_encoding, False
if isinstance(markup, str):
# No, apparently not. Convert the Unicode to UTF-8 and
# tell lxml to parse it as UTF-8.
yield (markup.encode("utf8"), "utf8",
document_declared_encoding, False)
# Instead of using UnicodeDammit to convert the bytestring to
# Unicode using different encodings, use EncodingDetector to
# iterate over the encodings, and tell lxml to try to parse
# the document as each one in turn.
is_html = not self.is_xml
try_encodings = [user_specified_encoding, document_declared_encoding]
detector = EncodingDetector(markup, try_encodings, is_html)
for encoding in detector.encodings:
yield (detector.markup, encoding, document_declared_encoding, False)
def feed(self, markup):
if isinstance(markup, bytes):
markup = BytesIO(markup)
elif isinstance(markup, str):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
try:
self.parser = self.parser_for(self.soup.original_encoding)
self.parser.feed(data)
while len(data) != 0:
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if len(data) != 0:
self.parser.feed(data)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
raise ParserRejectedMarkup(str(e))
def close(self):
self.nsmaps = [self.DEFAULT_NSMAPS]
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Invert each namespace map as it comes in.
if len(self.nsmaps) > 1:
# There are no new namespaces for this tag, but
# non-default namespaces are in play, so we need a
# separate tag stack to know when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
inverted_nsmap = dict((value, key) for key, value in list(nsmap.items()))
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in list(nsmap.items()):
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in list(attrs.items()):
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_namespace(namespace)
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def _prefix_for_namespace(self, namespace):
"""Find the currently active prefix for the given namespace."""
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
return None
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if len(self.nsmaps) > 1:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
def pi(self, target, data):
pass
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return '<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
features = [LXML, HTML, FAST, PERMISSIVE]
is_xml = False
def default_parser(self, encoding):
return etree.HTMLParser
def feed(self, markup):
encoding = self.soup.original_encoding
try:
self.parser = self.parser_for(encoding)
self.parser.feed(markup)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
raise ParserRejectedMarkup(str(e))
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return '<html><body>%s</body></html>' % fragment
| {
"repo_name": "bzhou26/Delaware-Crawler",
"path": "bs4/builder/_lxml.py",
"copies": "16",
"size": "8663",
"license": "mit",
"hash": 2495170085199681500,
"line_mean": 36.1802575107,
"line_max": 85,
"alpha_frac": 0.6108738312,
"autogenerated": false,
"ratio": 4.21352140077821,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
from io import BytesIO
from StringIO import StringIO
import collections
from lxml import etree
from argeweb.libs.bs4.element import Comment, Doctype, NamespacedAttribute
from argeweb.libs.bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
ParserRejectedMarkup,
TreeBuilder,
XML)
from argeweb.libs.bs4.dammit import EncodingDetector
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
# Well, it's permissive by XML parser standards.
features = [LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
# This namespace mapping is specified in the XML Namespace
# standard.
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
def default_parser(self, encoding):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
if self._default_parser is not None:
return self._default_parser
return etree.XMLParser(
target=self, strip_cdata=False, recover=True, encoding=encoding)
def parser_for(self, encoding):
# Use the default parser.
parser = self.default_parser(encoding)
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False, encoding=encoding)
return parser
def __init__(self, parser=None, empty_element_tags=None):
# TODO: Issue a warning if parser is present but not a
# callable, since that means there's no way to create new
# parsers for different encodings.
self._default_parser = parser
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
self.soup = None
self.nsmaps = [self.DEFAULT_NSMAPS]
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:yield: A series of 4-tuples.
(markup, encoding, declared encoding,
has undergone character replacement)
Each 4-tuple represents a strategy for parsing the document.
"""
if isinstance(markup, unicode):
# We were given Unicode. Maybe lxml can parse Unicode on
# this system?
yield markup, None, document_declared_encoding, False
if isinstance(markup, unicode):
# No, apparently not. Convert the Unicode to UTF-8 and
# tell lxml to parse it as UTF-8.
yield (markup.encode("utf8"), "utf8",
document_declared_encoding, False)
# Instead of using UnicodeDammit to convert the bytestring to
# Unicode using different encodings, use EncodingDetector to
# iterate over the encodings, and tell lxml to try to parse
# the document as each one in turn.
is_html = not self.is_xml
try_encodings = [user_specified_encoding, document_declared_encoding]
detector = EncodingDetector(markup, try_encodings, is_html)
for encoding in detector.encodings:
yield (detector.markup, encoding, document_declared_encoding, False)
def feed(self, markup):
if isinstance(markup, bytes):
markup = BytesIO(markup)
elif isinstance(markup, unicode):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
try:
self.parser = self.parser_for(self.soup.original_encoding)
self.parser.feed(data)
while len(data) != 0:
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if len(data) != 0:
self.parser.feed(data)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError), e:
raise ParserRejectedMarkup(str(e))
def close(self):
self.nsmaps = [self.DEFAULT_NSMAPS]
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Invert each namespace map as it comes in.
if len(self.nsmaps) > 1:
# There are no new namespaces for this tag, but
# non-default namespaces are in play, so we need a
# separate tag stack to know when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
inverted_nsmap = dict((value, key) for key, value in nsmap.items())
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in nsmap.items():
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in attrs.items():
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_namespace(namespace)
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def _prefix_for_namespace(self, namespace):
"""Find the currently active prefix for the given namespace."""
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
return None
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if len(self.nsmaps) > 1:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
def pi(self, target, data):
pass
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
features = [LXML, HTML, FAST, PERMISSIVE]
is_xml = False
def default_parser(self, encoding):
return etree.HTMLParser
def feed(self, markup):
encoding = self.soup.original_encoding
try:
self.parser = self.parser_for(encoding)
self.parser.feed(markup)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError), e:
raise ParserRejectedMarkup(str(e))
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><body>%s</body></html>' % fragment
| {
"repo_name": "argeweb/start",
"path": "argeweb/libs/bs4/builder/_lxml.py",
"copies": "1",
"size": "8700",
"license": "mit",
"hash": 4322037359213379000,
"line_mean": 36.339055794,
"line_max": 82,
"alpha_frac": 0.6125287356,
"autogenerated": false,
"ratio": 4.206963249516441,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5319491985116441,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
from io import BytesIO
from StringIO import StringIO
import collections
from lxml import etree
from bs4.element import Comment, Doctype, NamespacedAttribute
from bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
TreeBuilder,
XML)
from bs4.dammit import UnicodeDammit
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
# Well, it's permissive by XML parser standards.
features = [LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
# This namespace mapping is specified in the XML Namespace
# standard.
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
@property
def default_parser(self):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
return etree.XMLParser(target=self, strip_cdata=False, recover=True)
def __init__(self, parser=None, empty_element_tags=None):
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
if parser is None:
# Use the default parser.
parser = self.default_parser
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False)
self.parser = parser
self.soup = None
self.nsmaps = [self.DEFAULT_NSMAPS]
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:return: A 3-tuple (markup, original encoding, encoding
declared within markup).
"""
if isinstance(markup, unicode):
return markup, None, None, False
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
return (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
if isinstance(markup, bytes):
markup = BytesIO(markup)
elif isinstance(markup, unicode):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
self.parser.feed(data)
while data != '':
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if data != '':
self.parser.feed(data)
self.parser.close()
def close(self):
self.nsmaps = [self.DEFAULT_NSMAPS]
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Invert each namespace map as it comes in.
if len(self.nsmaps) > 1:
# There are no new namespaces for this tag, but
# non-default namespaces are in play, so we need a
# separate tag stack to know when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
inverted_nsmap = dict((value, key) for key, value in nsmap.items())
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in nsmap.items():
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in attrs.items():
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_namespace(namespace)
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def _prefix_for_namespace(self, namespace):
"""Find the currently active prefix for the given namespace."""
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
return None
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if len(self.nsmaps) > 1:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
def pi(self, target, data):
pass
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
features = [LXML, HTML, FAST, PERMISSIVE]
is_xml = False
@property
def default_parser(self):
return etree.HTMLParser
def feed(self, markup):
self.parser.feed(markup)
self.parser.close()
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><body>%s</body></html>' % fragment
| {
"repo_name": "pedropramos/gerador-horarios",
"path": "bs4/builder/_lxml.py",
"copies": "14",
"size": "7078",
"license": "mit",
"hash": -6736482342923745000,
"line_mean": 34.567839196,
"line_max": 82,
"alpha_frac": 0.606103419,
"autogenerated": false,
"ratio": 4.112725159790819,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
from io import BytesIO
from StringIO import StringIO
import collections
from lxml import etree
from ..element import Comment, Doctype, NamespacedAttribute
from ..builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
ParserRejectedMarkup,
TreeBuilder,
XML)
from ..dammit import EncodingDetector
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
# Well, it's permissive by XML parser standards.
features = [LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
# This namespace mapping is specified in the XML Namespace
# standard.
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
def default_parser(self, encoding):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
if self._default_parser is not None:
return self._default_parser
return etree.XMLParser(
target=self, strip_cdata=False, recover=True, encoding=encoding)
def parser_for(self, encoding):
# Use the default parser.
parser = self.default_parser(encoding)
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False, encoding=encoding)
return parser
def __init__(self, parser=None, empty_element_tags=None):
# TODO: Issue a warning if parser is present but not a
# callable, since that means there's no way to create new
# parsers for different encodings.
self._default_parser = parser
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
self.soup = None
self.nsmaps = [self.DEFAULT_NSMAPS]
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:yield: A series of 4-tuples.
(markup, encoding, declared encoding,
has undergone character replacement)
Each 4-tuple represents a strategy for parsing the document.
"""
if isinstance(markup, unicode):
# We were given Unicode. Maybe lxml can parse Unicode on
# this system?
yield markup, None, document_declared_encoding, False
if isinstance(markup, unicode):
# No, apparently not. Convert the Unicode to UTF-8 and
# tell lxml to parse it as UTF-8.
yield (markup.encode("utf8"), "utf8",
document_declared_encoding, False)
# Instead of using UnicodeDammit to convert the bytestring to
# Unicode using different encodings, use EncodingDetector to
# iterate over the encodings, and tell lxml to try to parse
# the document as each one in turn.
is_html = not self.is_xml
try_encodings = [user_specified_encoding, document_declared_encoding]
detector = EncodingDetector(markup, try_encodings, is_html)
for encoding in detector.encodings:
yield (detector.markup, encoding, document_declared_encoding, False)
def feed(self, markup):
if isinstance(markup, bytes):
markup = BytesIO(markup)
elif isinstance(markup, unicode):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
try:
self.parser = self.parser_for(self.soup.original_encoding)
self.parser.feed(data)
while len(data) != 0:
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if len(data) != 0:
self.parser.feed(data)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError), e:
raise ParserRejectedMarkup(str(e))
def close(self):
self.nsmaps = [self.DEFAULT_NSMAPS]
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Invert each namespace map as it comes in.
if len(self.nsmaps) > 1:
# There are no new namespaces for this tag, but
# non-default namespaces are in play, so we need a
# separate tag stack to know when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
inverted_nsmap = dict((value, key) for key, value in nsmap.items())
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in nsmap.items():
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in attrs.items():
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_namespace(namespace)
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def _prefix_for_namespace(self, namespace):
"""Find the currently active prefix for the given namespace."""
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
return None
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if len(self.nsmaps) > 1:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
def pi(self, target, data):
pass
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
features = [LXML, HTML, FAST, PERMISSIVE]
is_xml = False
def default_parser(self, encoding):
return etree.HTMLParser
def feed(self, markup):
encoding = self.soup.original_encoding
try:
self.parser = self.parser_for(encoding)
self.parser.feed(markup)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError), e:
raise ParserRejectedMarkup(str(e))
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><body>%s</body></html>' % fragment
| {
"repo_name": "yongshengwang/builthue",
"path": "desktop/core/ext-py/requests-2.0.0/requests/packages/bs4/builder/_lxml.py",
"copies": "2",
"size": "8655",
"license": "apache-2.0",
"hash": -2680636047397935600,
"line_mean": 36.1459227468,
"line_max": 82,
"alpha_frac": 0.6108607741,
"autogenerated": false,
"ratio": 4.234344422700587,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5845205196800587,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
from StringIO import StringIO
import collections
from lxml import etree
from alp.request.bs4.element import Comment, Doctype, NamespacedAttribute
from alp.request.bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
TreeBuilder,
XML)
from alp.request.bs4.dammit import UnicodeDammit
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
# Well, it's permissive by XML parser standards.
features = [LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
@property
def default_parser(self):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
return etree.XMLParser(target=self, strip_cdata=False, recover=True)
def __init__(self, parser=None, empty_element_tags=None):
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
if parser is None:
# Use the default parser.
parser = self.default_parser
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False)
self.parser = parser
self.soup = None
self.nsmaps = None
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:return: A 3-tuple (markup, original encoding, encoding
declared within markup).
"""
if isinstance(markup, unicode):
return markup, None, None, False
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
return (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
if isinstance(markup, basestring):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
self.parser.feed(data)
while data != '':
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if data != '':
self.parser.feed(data)
self.parser.close()
def close(self):
self.nsmaps = None
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Invert each namespace map as it comes in.
if len(nsmap) == 0 and self.nsmaps != None:
# There are no new namespaces for this tag, but namespaces
# are in play, so we need a separate tag stack to know
# when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
if self.nsmaps is None:
self.nsmaps = []
inverted_nsmap = dict((value, key) for key, value in nsmap.items())
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in nsmap.items():
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
if self.nsmaps is not None and len(self.nsmaps) > 0:
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in attrs.items():
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_namespace(namespace)
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def _prefix_for_namespace(self, namespace):
"""Find the currently active prefix for the given namespace."""
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if self.nsmaps != None:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
if len(self.nsmaps) == 0:
# Namespaces are no longer in play, so don't bother keeping
# track of the namespace stack.
self.nsmaps = None
def pi(self, target, data):
pass
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
features = [LXML, HTML, FAST, PERMISSIVE]
is_xml = False
@property
def default_parser(self):
return etree.HTMLParser
def feed(self, markup):
self.parser.feed(markup)
self.parser.close()
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><body>%s</body></html>' % fragment
| {
"repo_name": "bfontaine/alfred-pp",
"path": "alp/request/bs4/builder/_lxml.py",
"copies": "2",
"size": "7199",
"license": "mit",
"hash": -4165564893127535000,
"line_mean": 35.175879397,
"line_max": 82,
"alpha_frac": 0.5961939158,
"autogenerated": false,
"ratio": 4.156466512702078,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5752660428502078,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
import collections
from lxml import etree
from bs4.element import Comment, Doctype, NamespacedAttribute
from bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
TreeBuilder,
XML)
from bs4.dammit import UnicodeDammit
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
# Well, it's permissive by XML parser standards.
features = [LXML, XML, FAST, PERMISSIVE]
@property
def default_parser(self):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
return etree.XMLParser(target=self, strip_cdata=False, recover=True)
def __init__(self, parser=None, empty_element_tags=None):
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
if parser is None:
# Use the default parser.
parser = self.default_parser
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False)
self.parser = parser
self.soup = None
self.nsmaps = None
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:return: A 3-tuple (markup, original encoding, encoding
declared within markup).
"""
if isinstance(markup, str):
return markup, None, None, False
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
return (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
self.parser.feed(markup)
self.parser.close()
def close(self):
self.nsmaps = None
def start(self, name, attrs, nsmap={}):
nsprefix = None
# Invert each namespace map as it comes in.
if len(nsmap) == 0 and self.nsmaps != None:
# There are no new namespaces for this tag, but namespaces
# are in play, so we need a separate tag stack to know
# when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
if self.nsmaps is None:
self.nsmaps = []
inverted_nsmap = dict((value, key) for key, value in list(nsmap.items()))
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in list(nsmap.items()):
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
namespace, name = self._getNsTag(name)
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if self.nsmaps != None:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
if len(self.nsmaps) == 0:
# Namespaces are no longer in play, so don't bother keeping
# track of the namespace stack.
self.nsmaps = None
def pi(self, target, data):
pass
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return '<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
features = [LXML, HTML, FAST, PERMISSIVE]
is_xml = False
@property
def default_parser(self):
return etree.HTMLParser
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return '<html><body>%s</body></html>' % fragment
| {
"repo_name": "Zord13appdesa/python-for-android",
"path": "python3-alpha/extra_modules/bs4/builder/_lxml.py",
"copies": "46",
"size": "5603",
"license": "apache-2.0",
"hash": -2784184247092684000,
"line_mean": 34.2389937107,
"line_max": 85,
"alpha_frac": 0.5995002677,
"autogenerated": false,
"ratio": 4.083819241982507,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
import collections
from lxml import etree
from bs4.element import Comment, Doctype
from bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
TreeBuilder,
XML)
from bs4.dammit import UnicodeDammit
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
# Well, it's permissive by XML parser standards.
features = [LXML, XML, FAST, PERMISSIVE]
@property
def default_parser(self):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
return etree.XMLParser(target=self, strip_cdata=False, recover=True)
def __init__(self, parser=None, empty_element_tags=None):
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
if parser is None:
# Use the default parser.
parser = self.default_parser
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False)
self.parser = parser
self.soup = None
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:return: A 3-tuple (markup, original encoding, encoding
declared within markup).
"""
if isinstance(markup, unicode):
return markup, None, None
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, isHTML=True)
return (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding)
def feed(self, markup):
self.parser.feed(markup)
self.parser.close()
def close(self):
pass
def start(self, name, attrs):
self.soup.handle_starttag(name, attrs)
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
self.soup.handle_endtag(name)
def pi(self, target, data):
pass
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<?xml version="1.0" encoding="utf-8">\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
features = [LXML, HTML, FAST, PERMISSIVE]
is_xml = False
@property
def default_parser(self):
return etree.HTMLParser
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><body>%s</body></html>' % fragment
| {
"repo_name": "colinmcd94/kickdata",
"path": "lib/bs4/oldbs4/builder/_lxml.py",
"copies": "2",
"size": "3169",
"license": "apache-2.0",
"hash": -4018932810000257000,
"line_mean": 28.3425925926,
"line_max": 77,
"alpha_frac": 0.6330072578,
"autogenerated": false,
"ratio": 3.907521578298397,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002204585537918871,
"num_lines": 108
} |
__all__ = [
'main',
]
from g1 import scripts
from g1.bases import argparses
from g1.bases import oses
from g1.bases.assertions import ASSERT
from . import alerts
from . import bases
from . import envs
from . import pod_ops_dirs
from . import tokens
from . import xar_ops_dirs
@argparses.begin_parser(
'init',
**argparses.make_help_kwargs('initialize operations repository'),
)
@argparses.argument(
'--bootstrap',
action=argparses.StoreBoolAction,
default=False,
help='enable bootstrap mode (default: %(default_string)s)',
)
@argparses.end
def cmd_init(args):
oses.assert_root_privilege()
# Check pod and XAR dependencies.
if not args.bootstrap:
scripts.assert_command_exist('ctr')
scripts.assert_command_exist('systemctl')
scripts.assert_command_exist('tar')
# Check alert dependencies.
scripts.assert_command_exist('journalctl')
scripts.assert_command_exist('tail')
bases.make_dir(bases.get_repo_path(), parents=True)
alerts.init()
envs.init()
pod_ops_dirs.init()
xar_ops_dirs.init()
tokens.init()
return 0
@argparses.begin_parser(
'cleanup',
**argparses.make_help_kwargs('clean up operations repository'),
)
@argparses.end
def cmd_cleanup():
oses.assert_root_privilege()
ops_dirs = pod_ops_dirs.make_ops_dirs()
ops_dirs.cleanup()
xar_ops_dirs.make_ops_dirs().cleanup()
tokens.make_tokens_database().cleanup(ops_dirs)
return 0
@argparses.begin_parser(
'repos',
**argparses.make_help_kwargs('manage operations repository'),
)
@argparses.begin_subparsers_for_subcmds(dest='command')
@argparses.include(cmd_init)
@argparses.include(cmd_cleanup)
@argparses.end
@argparses.end
def main(args):
if args.command == 'init':
return cmd_init(args)
elif args.command == 'cleanup':
return cmd_cleanup()
else:
return ASSERT.unreachable('unknown command: {}', args.command)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/operations/cores/g1/operations/cores/repo_cmds.py",
"copies": "1",
"size": "1944",
"license": "mit",
"hash": -3153221123087140000,
"line_mean": 23,
"line_max": 70,
"alpha_frac": 0.6862139918,
"autogenerated": false,
"ratio": 3.261744966442953,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4447958958242953,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'main',
]
import argparse
import itertools
import logging
import sys
from collections import OrderedDict
import iga.context
import iga.fargparse
import iga.ninja
import iga.package
from iga.context import load_workspace
from iga.label import Label
from iga.ninja import NinjaRule
def parse_argv(argv):
parser = argparse.ArgumentParser(prog='iga', description='''
iga meta-build system
''')
parser.add_argument(
'-v', '--verbose', action='count', default=0,
help='verbose output')
parser.add_argument(
'label',
help='build target')
return parser.parse_args(argv[1:])
def init(args):
if args.verbose == 0:
level = logging.WARNING
format = '%(levelname)s %(message)s'
elif args.verbose == 1:
level = logging.INFO
format = '%(levelname)s %(message)s'
else:
level = logging.DEBUG
format = '%(asctime)s %(levelname)s %(name)s: %(message)s'
logging.basicConfig(level=level, format=format)
iga.fargparse.Parser.register_parse_func(str, parse_string)
from iga.rules import cc
cc.init()
from iga.rules import genrule
genrule.init()
def parse_string(string):
if not isinstance(string, str):
raise iga.fargparse.ParseError()
return string
def main(argv=None):
args = parse_argv(argv or sys.argv)
init(args)
load_workspace()
label = Label.parse_cmdline(args.label)
rules = OrderedDict()
ninja_rules = OrderedDict()
queue = [iga.package.get_rule(label)]
while queue:
rule = queue.pop(0)
if rule.name in rules:
continue
rules[rule.name] = rule
for ninja_rule in rule.rule_type.ninja_rules:
ninja_rules[ninja_rule] = NinjaRule.get_object(ninja_rule)
queue.extend(generate_input_rules(rule))
iga.context.current().update(
outputs=iga.package.get_outputs(),
_parsed=True,
)
with open('build.ninja', 'w') as ninja_file:
iga.ninja.write_header_to(ninja_file)
for ninja_rule in ninja_rules.values():
ninja_rule.write_to(ninja_file)
for rule in rules.values():
with iga.context.create() as cxt:
cxt['package'] = rule.name.package
rule.write_to(ninja_file)
return 0
def generate_input_rules(rule):
for label in itertools.chain.from_iterable(rule.inputs.values()):
rule = iga.package.get_rule(label, raises=False)
if rule is not None:
yield rule
| {
"repo_name": "clchiou/iga",
"path": "iga/main.py",
"copies": "1",
"size": "2554",
"license": "mit",
"hash": -7399041527092131000,
"line_mean": 24.54,
"line_max": 70,
"alpha_frac": 0.6280344558,
"autogenerated": false,
"ratio": 3.659025787965616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4787060243765616,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'main',
]
import logging
import sys
from g1.bases import argparses
from g1.bases import oses
from g1.bases.assertions import ASSERT
from g1.containers import models as ctr_models
from g1.texts import columns
from g1.texts.columns import argparses as columns_argparses
from . import models
from . import pod_ops_dirs
from . import tokens
LOG = logging.getLogger(__name__)
_DEFINITION_LIST_COLUMNS = frozenset((
'token-name',
'range',
'values',
))
_DEFINITION_LIST_DEFAULT_COLUMNS = (
'token-name',
'range',
'values',
)
_DEFINITION_LIST_STRINGIFIERS = {
'range': lambda args: ' '.join(map(str, args)),
'values': ' '.join,
}
ASSERT.issuperset(_DEFINITION_LIST_COLUMNS, _DEFINITION_LIST_DEFAULT_COLUMNS)
ASSERT.issuperset(_DEFINITION_LIST_COLUMNS, _DEFINITION_LIST_STRINGIFIERS)
@argparses.begin_parser(
'list-definitions', **argparses.make_help_kwargs('list token definitions')
)
@columns_argparses.columnar_arguments(
_DEFINITION_LIST_COLUMNS, _DEFINITION_LIST_DEFAULT_COLUMNS
)
@argparses.end
def cmd_list_definitions(args):
columnar = columns.Columnar(
**columns_argparses.make_columnar_kwargs(args),
stringifiers=_DEFINITION_LIST_STRINGIFIERS,
)
for token_name, definition in (
tokens.make_tokens_database().get().definitions.items()
):
if definition.kind == 'range':
columnar.append({
'token-name': token_name,
'range': definition.args,
'values': (),
})
else:
ASSERT.equal(definition.kind, 'values')
columnar.append({
'token-name': token_name,
'range': (),
'values': definition.args,
})
columnar.sort(lambda row: row['token-name'])
columnar.output(sys.stdout)
return 0
_ASSIGNMENT_LIST_COLUMNS = frozenset((
'token-name',
'pod-id',
'name',
'value',
))
_ASSIGNMENT_LIST_DEFAULT_COLUMNS = (
'token-name',
'pod-id',
'name',
'value',
)
_ASSIGNMENT_LIST_STRINGIFIERS = {}
ASSERT.issuperset(_ASSIGNMENT_LIST_COLUMNS, _ASSIGNMENT_LIST_DEFAULT_COLUMNS)
ASSERT.issuperset(_ASSIGNMENT_LIST_COLUMNS, _ASSIGNMENT_LIST_STRINGIFIERS)
@argparses.begin_parser(
'list-assignments', **argparses.make_help_kwargs('list token assignments')
)
@columns_argparses.columnar_arguments(
_ASSIGNMENT_LIST_COLUMNS, _ASSIGNMENT_LIST_DEFAULT_COLUMNS
)
@argparses.end
def cmd_list_assignments(args):
columnar = columns.Columnar(
**columns_argparses.make_columnar_kwargs(args),
stringifiers=_ASSIGNMENT_LIST_STRINGIFIERS,
)
for token_name, assignments in (
tokens.make_tokens_database().get().assignments.items()
):
for assignment in assignments:
columnar.append({
'token-name': token_name,
'pod-id': assignment.pod_id,
'name': assignment.name,
'value': assignment.value,
})
columnar.sort(
lambda row:
(row['token-name'], row['pod-id'], row['name'], row['value'])
)
columnar.output(sys.stdout)
return 0
@argparses.begin_parser(
'define', **argparses.make_help_kwargs('define a token')
)
@argparses.begin_mutually_exclusive_group(required=True)
@argparses.argument(
'--range',
metavar=('LOWER', 'UPPER'),
nargs=2,
help='provide range of tokens',
)
@argparses.argument(
'--value',
action='append',
help='add token value',
)
@argparses.end
@argparses.argument(
'token_name',
type=models.validate_token_name,
help='provide name of token',
)
@argparses.end
def cmd_define(args):
oses.assert_root_privilege()
if args.range:
definition = tokens.Tokens.Definition(
kind='range',
args=[int(args.range[0]), int(args.range[1])],
)
else:
definition = tokens.Tokens.Definition(
kind='values',
args=ASSERT.not_none(args.value),
)
with tokens.make_tokens_database().writing() as active_tokens:
if active_tokens.has_definition(args.token_name):
active_tokens.update_definition(args.token_name, definition)
else:
active_tokens.add_definition(args.token_name, definition)
return 0
@argparses.begin_parser(
'undefine', **argparses.make_help_kwargs('undefine a token')
)
@argparses.argument(
'token_name',
type=models.validate_token_name,
help='provide name of token',
)
@argparses.end
def cmd_undefine(args):
oses.assert_root_privilege()
active_pod_ids = tokens.load_active_pod_ids(pod_ops_dirs.make_ops_dirs())
with tokens.make_tokens_database().writing() as active_tokens:
if not active_tokens.has_definition(args.token_name):
LOG.info('skip: tokens undefine: %s', args.token_name)
return 0
LOG.info('tokens undefine: %s', args.token_name)
ASSERT.isdisjoint(
active_pod_ids, active_tokens.iter_pod_ids(args.token_name)
)
active_tokens.remove_definition(args.token_name)
return 0
@argparses.begin_parser(
'assign', **argparses.make_help_kwargs('assign a token to a pod')
)
@argparses.argument(
'token_name',
type=models.validate_token_name,
help='provide name of token',
)
@argparses.argument(
'pod_id',
type=ctr_models.validate_pod_id,
help='provide pod id',
)
@argparses.argument(
'name',
help='provide assignment name',
)
@argparses.argument(
'--value',
help='select token value (default: the next available one)',
)
@argparses.end
def cmd_assign(args):
oses.assert_root_privilege()
ASSERT.in_(
args.pod_id,
tokens.load_active_pod_ids(pod_ops_dirs.make_ops_dirs()),
)
with tokens.make_tokens_database().writing() as active_tokens:
ASSERT.predicate(args.token_name, active_tokens.has_definition)
active_tokens.assign(
args.token_name, args.pod_id, args.name, args.value
)
return 0
@argparses.begin_parser(
'unassign', **argparses.make_help_kwargs('unassign a token from a pod')
)
@argparses.argument(
'token_name',
type=models.validate_token_name,
help='provide name of token',
)
@argparses.argument(
'pod_id',
type=ctr_models.validate_pod_id,
help='provide pod id',
)
@argparses.argument(
'name',
help='provide assignment name',
)
@argparses.end
def cmd_unassign(args):
oses.assert_root_privilege()
# You can only unassign a token from a removed pod (in this case, we
# treat the host as removed).
active_pod_ids = tokens.load_active_pod_ids(pod_ops_dirs.make_ops_dirs())
active_pod_ids.remove(ctr_models.read_host_pod_id())
ASSERT.not_in(args.pod_id, active_pod_ids)
with tokens.make_tokens_database().writing() as active_tokens:
ASSERT.predicate(args.token_name, active_tokens.has_definition)
active_tokens.unassign(args.token_name, args.pod_id, args.name)
return 0
@argparses.begin_parser(
'tokens', **argparses.make_help_kwargs('manage tokens')
)
@argparses.begin_subparsers_for_subcmds(dest='command')
@argparses.include(cmd_list_definitions)
@argparses.include(cmd_list_assignments)
@argparses.include(cmd_define)
@argparses.include(cmd_undefine)
@argparses.include(cmd_assign)
@argparses.include(cmd_unassign)
@argparses.end
@argparses.end
def main(args):
if args.command == 'list-definitions':
return cmd_list_definitions(args)
elif args.command == 'list-assignments':
return cmd_list_assignments(args)
elif args.command == 'define':
return cmd_define(args)
elif args.command == 'undefine':
return cmd_undefine(args)
elif args.command == 'assign':
return cmd_assign(args)
elif args.command == 'unassign':
return cmd_unassign(args)
else:
return ASSERT.unreachable('unknown command: {}', args.command)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/operations/cores/g1/operations/cores/token_cmds.py",
"copies": "1",
"size": "7952",
"license": "mit",
"hash": 2057044704145725700,
"line_mean": 27.6043165468,
"line_max": 78,
"alpha_frac": 0.6506539235,
"autogenerated": false,
"ratio": 3.345393352965923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4496047276465923,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'main',
]
import sys
from g1.bases import argparses
from g1.bases import oses
from g1.bases.assertions import ASSERT
from g1.texts import columns
from g1.texts.columns import argparses as columns_argparses
from . import envs
from . import models
_ENV_LIST_COLUMNS = frozenset((
'name',
'value',
))
_ENV_LIST_DEFAULT_COLUMNS = (
'name',
'value',
)
ASSERT.issuperset(_ENV_LIST_COLUMNS, _ENV_LIST_DEFAULT_COLUMNS)
@argparses.begin_parser(
'list',
**argparses.make_help_kwargs('list environment variables'),
)
@columns_argparses.columnar_arguments(
_ENV_LIST_COLUMNS, _ENV_LIST_DEFAULT_COLUMNS
)
@argparses.end
def cmd_list(args):
columnar = columns.Columnar(**columns_argparses.make_columnar_kwargs(args))
for name, value in envs.load().items():
columnar.append({'name': name, 'value': value})
columnar.output(sys.stdout)
return 0
@argparses.begin_parser(
'set',
**argparses.make_help_kwargs('create or update an environment variable'),
)
@argparses.argument(
'name',
type=models.validate_env_name,
help='environment variable name',
)
@argparses.argument(
'value',
# TODO: What restriction should we put on the value string format?
help='environment variable value',
)
@argparses.end
def cmd_set(args):
oses.assert_root_privilege()
env_dict = envs.load()
env_dict[args.name] = args.value
envs.save(env_dict)
return 0
@argparses.begin_parser(
'remove',
**argparses.make_help_kwargs('remove an environment variable'),
)
@argparses.argument(
'name',
type=models.validate_env_name,
help='environment variable name',
)
@argparses.end
def cmd_remove(args):
oses.assert_root_privilege()
env_dict = envs.load()
if env_dict.pop(args.name, None) is not None:
envs.save(env_dict)
return 0
@argparses.begin_parser(
'envs', **argparses.make_help_kwargs('manage environment variables')
)
@argparses.begin_subparsers_for_subcmds(dest='command')
@argparses.include(cmd_list)
@argparses.include(cmd_set)
@argparses.include(cmd_remove)
@argparses.end
@argparses.end
def main(args):
if args.command == 'list':
return cmd_list(args)
elif args.command == 'set':
return cmd_set(args)
elif args.command == 'remove':
return cmd_remove(args)
else:
return ASSERT.unreachable('unknown command: {}', args.command)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/operations/cores/g1/operations/cores/env_cmds.py",
"copies": "1",
"size": "2411",
"license": "mit",
"hash": -167832319378670980,
"line_mean": 22.8712871287,
"line_max": 79,
"alpha_frac": 0.6831190377,
"autogenerated": false,
"ratio": 3.1311688311688313,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43142878688688313,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'main',
'run',
]
from startup import startup
import g1.scripts.parts
from g1.apps import bases as apps_bases
from g1.bases import argparses
from g1.bases.assertions import ASSERT
from . import alert_cmds
from . import env_cmds
from . import pod_cmds
from . import repo_cmds
from . import token_cmds
from . import xar_cmds
@argparses.begin_subparsers_for_subcmds(dest='subject')
@argparses.include(repo_cmds.main)
@argparses.include(alert_cmds.main)
@argparses.include(pod_cmds.main)
@argparses.include(xar_cmds.main)
@argparses.include(env_cmds.main)
@argparses.include(token_cmds.main)
@argparses.end
def main(args: apps_bases.LABELS.args, _: g1.scripts.parts.LABELS.setup):
"""Operations tool."""
if args.subject == 'repos':
return repo_cmds.main(args)
elif args.subject == 'alerts':
return alert_cmds.main(args)
elif args.subject == 'pods':
return pod_cmds.main(args)
elif args.subject == 'xars':
return xar_cmds.main(args)
elif args.subject == 'envs':
return env_cmds.main(args)
elif args.subject == 'tokens':
return token_cmds.main(args)
else:
return ASSERT.unreachable('unknown subject: {}', args.subject)
def add_arguments(parser: apps_bases.LABELS.parser) -> apps_bases.LABELS.parse:
argparses.make_argument_parser(main, parser=parser)
def run():
startup(add_arguments)
apps_bases.run(main, prog='ops')
| {
"repo_name": "clchiou/garage",
"path": "py/g1/operations/cores/g1/operations/cores/apps.py",
"copies": "1",
"size": "1440",
"license": "mit",
"hash": 3158717034613512700,
"line_mean": 26.1698113208,
"line_max": 79,
"alpha_frac": 0.6930555556,
"autogenerated": false,
"ratio": 3.123644251626898,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9316699807226898,
"avg_score": 0,
"num_lines": 53
} |
__all__ = [
'main',
'run',
]
import json
import sys
from pathlib import Path
from startup import startup
import g1.scripts.parts
from g1.apps import bases
from g1.bases import argparses
from g1.bases.assertions import ASSERT
from . import build
from . import cleanup
from . import repos
@argparses.begin_parser(
'init',
**argparses.make_help_kwargs('initialize release repository'),
)
@argparses.end
def cmd_init(args):
repos.EnvsDir.init(args.release_repo)
repos.PodDir.init(args.release_repo)
repos.XarDir.init(args.release_repo)
repos.BuilderImageDir.init(args.release_repo)
repos.ImageDir.init(args.release_repo)
repos.VolumeDir.init(args.release_repo)
return 0
@argparses.begin_parser(
'list',
**argparses.make_help_kwargs('list build artifacts'),
)
@argparses.end
def cmd_list(args):
envs_dir = repos.EnvsDir(args.release_repo)
data = {
'pod-releases': {
env: {
str(pod_dir.label): pod_dir.version
for pod_dir in envs_dir.sort_pod_dirs(env)
}
for env in envs_dir.envs
},
'xar-releases': {
env: {
str(xar_dir.label): xar_dir.version
for xar_dir in envs_dir.sort_xar_dirs(env)
}
for env in envs_dir.envs
},
}
for name, cls in (
('pods', repos.PodDir),
('xars', repos.XarDir),
('builder-images', repos.BuilderImageDir),
('images', repos.ImageDir),
('volumes', repos.VolumeDir),
):
groups = cls.group_dirs(args.release_repo)
data[name] = {
str(label): [obj.version for obj in dir_objects]
for label, dir_objects in groups.items()
}
json.dump(data, sys.stdout, indent=4)
sys.stdout.write('\n')
return 0
@argparses.argument(
'--release-repo',
type=Path,
required=True,
help='provide host path to release repository',
)
@argparses.begin_subparsers_for_subcmds(dest='command')
@argparses.include(cmd_init)
@argparses.include(cmd_list)
@argparses.include(build.cmd_build)
@argparses.include(build.cmd_release)
@argparses.include(build.cmd_unrelease)
@argparses.include(build.cmd_remove)
@argparses.include(cleanup.cmd_cleanup)
@argparses.end
def main(
args: bases.LABELS.args,
_: g1.scripts.parts.LABELS.setup,
):
"""Release process manager."""
if args.command == 'init':
return cmd_init(args)
elif args.command == 'list':
return cmd_list(args)
elif args.command == 'build':
return build.cmd_build(args)
elif args.command == 'release':
return build.cmd_release(args)
elif args.command == 'unrelease':
return build.cmd_unrelease(args)
elif args.command == 'remove':
return build.cmd_remove(args)
elif args.command == 'cleanup':
return cleanup.cmd_cleanup(args)
else:
ASSERT.unreachable('unknown command: {}', args.command)
return 0
def add_arguments(parser: bases.LABELS.parser) -> bases.LABELS.parse:
argparses.make_argument_parser(main, parser=parser)
def run():
startup(add_arguments)
bases.run(main, prog='release')
| {
"repo_name": "clchiou/garage",
"path": "shipyard2/shipyard2/releases/apps.py",
"copies": "1",
"size": "3210",
"license": "mit",
"hash": -9076809681554804000,
"line_mean": 25.3114754098,
"line_max": 69,
"alpha_frac": 0.6320872274,
"autogenerated": false,
"ratio": 3.3092783505154637,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44413655779154637,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'make_bdist_zipapp',
]
import distutils.core
import distutils.errors
import distutils.log
import stat
import tempfile
import zipfile
from pathlib import Path
def make_bdist_zipapp(
*, python='/usr/bin/env python3', main_optional=False, main=None
):
# pylint: disable=too-many-statements
class bdist_zipapp(distutils.core.Command):
PYTHON = python
MAIN = main
MAIN_TEMPLATE = (
'# -*- coding: utf-8 -*-\n'
'import {module}\n'
'{module}.{func}()\n'
)
description = "create a zipapp distribution"
user_options = [
('python=', None, "python interpreter to use"),
('main=', None, "main function of the zipapp"),
('output=', None, "output zipapp path"),
]
def __init__(self, dist):
super().__init__(dist)
self.python = self.PYTHON
self.main = self.MAIN
self.output = None
def initialize_options(self):
self.python = self.PYTHON
self.main = self.MAIN
self.output = None
def finalize_options(self):
if self.python is None:
raise distutils.errors.DistutilsOptionError(
'--python is required'
)
if not main_optional and self.main is None:
raise distutils.errors.DistutilsOptionError(
'--main is required'
)
if self.output is None:
raise distutils.errors.DistutilsOptionError(
'--output is required'
)
self.output = Path(self.output)
def run(self):
if self.distribution.has_ext_modules():
raise distutils.errors.DistutilsPlatformError(
'disallow making zipapp with ext module for now'
)
with tempfile.TemporaryDirectory(
dir=self.output.parent,
prefix=self.output.name + '-',
) as build_dir:
self._run(build_dir)
def _run(self, build_dir):
self.run_command('build')
distutils.log.info('installing to %s' % build_dir)
install = self.reinitialize_command(
'install',
reinit_subcommands=1,
)
install.root = build_dir
# Install lib and data but ignore headers, scripts, and egg
# info at the moment.
if self.distribution.has_pure_modules():
self.run_command('install_lib')
if self.distribution.has_data_files():
self.run_command('install_data')
install_lib = self.distribution.get_command_obj('install_lib')
install_dir = Path(install_lib.install_dir)
if self.main is not None:
main_path = install_dir / '__main__.py'
module, func = self.main.rsplit(':', maxsplit=1)
distutils.log.info('generate: %s' % main_path)
with open(main_path, 'w') as main_file:
main_file.write(
self.MAIN_TEMPLATE.format(
module=module,
func=func,
)
)
def open_zip_archive(file, mode):
# It seems that Python interpreter can only load
# DEFLATE-compressed zip file.
return zipfile.ZipFile(
file,
mode=mode,
compression=zipfile.ZIP_DEFLATED,
)
def add_content(zip_archive):
for child in install_dir.rglob('*'):
arcname = child.relative_to(install_dir)
# TODO: This might create duplicated entries (for
# example, multiple "g1/" directories). We probably
# should fix this?
zip_archive.write(str(child), str(arcname))
if self.output.exists():
distutils.log.info('append to: %s' % self.output)
with open_zip_archive(self.output, 'a') as zip_archive:
add_content(zip_archive)
else:
distutils.log.info('generate: %s' % self.output)
with open(self.output, 'wb') as output_file:
output_file.write(b'#!%s\n' % self.python.encode('utf-8'))
# Call flush() to ensure that zip content is after
# shebang.
output_file.flush()
with open_zip_archive(output_file, 'w') as zip_archive:
add_content(zip_archive)
# Do `chmod a+x`.
mode = self.output.stat().st_mode
self.output.chmod(stat.S_IMODE(mode) | 0o111)
return bdist_zipapp
| {
"repo_name": "clchiou/garage",
"path": "py/g1/devtools/buildtools/g1/devtools/buildtools/zipapps.py",
"copies": "1",
"size": "4968",
"license": "mit",
"hash": -7487802471318898000,
"line_mean": 33.985915493,
"line_max": 78,
"alpha_frac": 0.501610306,
"autogenerated": false,
"ratio": 4.516363636363637,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5517973942363636,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'make_compile_schemas',
]
import subprocess
import warnings
from distutils import log
from distutils.core import Command
from pathlib import Path
def make_compile_schemas(schemas, *, import_paths=()):
class compile_schemas(Command):
IMPORT_PATH = ':'.join(map(str, import_paths))
description = "compile Cap'n Proto schema files"
user_options = [
('import-path=', None, 'schema file search path'),
]
def initialize_options(self):
self.import_path = self.IMPORT_PATH
def finalize_options(self):
pass
def run(self):
_compile_schemas(schemas, self.import_path.split(':'))
return compile_schemas
def _compile_schemas(schemas, import_paths):
"""Generate the CodeGeneratorRequest."""
schema_paths = _find_schema_paths(schemas, import_paths)
for import_, output_path in sorted(schemas.items()):
output_path = Path(output_path).absolute()
if not output_path.parent.is_dir():
cmd = ['mkdir', '--parents', str(output_path.parent)]
subprocess.run(cmd, check=True)
_compile(schema_paths[import_], import_paths, output_path)
def _is_absolute_import(import_):
return import_.startswith('/')
def _find_schema_paths(imports, import_paths):
"""Find all imported Cap'n Proto schema files."""
for import_ in imports:
if not _is_absolute_import(import_):
raise ValueError('all input must be absolute: %s' % import_)
import_paths = [Path(p).absolute() for p in import_paths]
for import_path in import_paths:
if not import_path.is_dir():
warnings.warn('not a directory: %s' % import_path)
schema_paths = {}
for import_ in imports:
if import_ not in schema_paths:
schema_paths[import_] = _find_import_path(import_paths, import_)
return schema_paths
def _find_import_path(import_paths, import_):
assert _is_absolute_import(import_)
found = []
for import_path in import_paths:
schema_path = _make_schema_path(import_path, import_)
if schema_path.is_file():
found.append(schema_path)
if not found:
raise FileNotFoundError('no import path for %r' % import_)
if len(found) > 1:
raise RuntimeError(
'find multiple import paths for %r: %s' % (import_, found)
)
return found[0]
def _make_schema_path(import_path, import_):
# import_ must be an absolute path.
assert import_[0] == '/' and import_[1] != '/', import_
return import_path / import_[1:]
def _compile(schema_path, import_paths, output_path):
"""Compile the schema."""
cmd = ['capnp', 'compile', '-o-']
for import_path in import_paths:
cmd.append('--import-path=%s' % Path(import_path).absolute())
cmd.append(str(schema_path))
log.info('execute: %s > %s', ' '.join(cmd), output_path)
with output_path.open('wb') as output:
subprocess.run(cmd, stdout=output, check=True)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/devtools/buildtools/g1/devtools/buildtools/capnps.py",
"copies": "1",
"size": "3044",
"license": "mit",
"hash": 3416143251514324500,
"line_mean": 29.1386138614,
"line_max": 76,
"alpha_frac": 0.6199080158,
"autogenerated": false,
"ratio": 3.758024691358025,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9877932707158025,
"avg_score": 0,
"num_lines": 101
} |
__all__ = [
'make_compile_schemas',
]
import warnings
from distutils import log
from distutils.core import Command
from pathlib import Path
from subprocess import check_call
def make_compile_schemas(schemas, *, import_paths=()):
class compile_schemas(Command):
IMPORT_PATH = ':'.join(map(str, import_paths))
description = "compile Cap'n Proto schema files"
user_options = [
('import-path=', None, 'schema file search path'),
]
def initialize_options(self):
self.import_path = self.IMPORT_PATH
def finalize_options(self):
pass
def run(self):
_compile_schemas(schemas, self.import_path.split(':'))
return compile_schemas
def _compile_schemas(schemas, import_paths):
"""Generate the CodeGeneratorRequest."""
schema_paths = _find_schema_paths(schemas, import_paths)
for import_, output_path in sorted(schemas.items()):
output_path = Path(output_path).absolute()
if not output_path.parent.is_dir():
check_call(['mkdir', '--parents', str(output_path.parent)])
_compile(schema_paths[import_], import_paths, output_path)
def _is_absolute_import(import_):
return import_.startswith('/')
def _find_schema_paths(imports, import_paths):
"""Find all imported Cap'n Proto schema files."""
for import_ in imports:
if not _is_absolute_import(import_):
raise ValueError('all input must be absolute: %s' % import_)
import_paths = [Path(p).absolute() for p in import_paths]
for import_path in import_paths:
if not import_path.is_dir():
warnings.warn('not a directory: %s' % import_path)
schema_paths = {}
for import_ in imports:
if import_ not in schema_paths:
schema_paths[import_] = _find_import_path(import_paths, import_)
return schema_paths
def _find_import_path(import_paths, import_):
assert _is_absolute_import(import_)
found = []
for import_path in import_paths:
schema_path = _make_schema_path(import_path, import_)
if schema_path.is_file():
found.append(schema_path)
if not found:
raise FileNotFoundError('no import path for %r' % import_)
if len(found) > 1:
raise RuntimeError(
'find multiple import paths for %r: %s' % (import_, found))
return found[0]
def _make_schema_path(import_path, import_):
# import_ must be an absolute path.
assert import_[0] == '/' and import_[1] != '/', import_
return import_path / import_[1:]
def _compile(schema_path, import_paths, output_path):
"""Compile the schema."""
cmd = ['capnp', 'compile', '-o-']
for import_path in import_paths:
cmd.append('--import-path=%s' % Path(import_path).absolute())
cmd.append(str(schema_path))
log.info('execute: %s > %s', ' '.join(cmd), output_path)
with output_path.open('wb') as output:
check_call(cmd, stdout=output)
| {
"repo_name": "clchiou/garage",
"path": "py/buildtools/buildtools/capnp.py",
"copies": "1",
"size": "2997",
"license": "mit",
"hash": -8321407284662016000,
"line_mean": 29.2727272727,
"line_max": 76,
"alpha_frac": 0.6232899566,
"autogenerated": false,
"ratio": 3.755639097744361,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9878929054344361,
"avg_score": 0,
"num_lines": 99
} |
__all__ = [
'make_json_formatter',
'make_yaml_formatter',
]
import datetime
import enum
from collections import Mapping
from collections import OrderedDict
from functools import partial
def make_json_formatter():
import json
from .json import encode_datetime
from .json import encode_mapping
from .json import join_encoders
encoder = join_encoders(
# Order by frequency (a very small optimization)
encode_mapping,
encode_datetime,
)
return partial(
json.dumps,
ensure_ascii=False,
indent=4,
default=encoder,
)
def make_yaml_formatter():
import yaml
from .yaml import represent_datetime
from .yaml import represent_enum
from .yaml import represent_mapping
yaml.SafeDumper.add_representer(datetime.datetime, represent_datetime)
yaml.SafeDumper.add_multi_representer(enum.Enum, represent_enum)
yaml.SafeDumper.add_multi_representer(Mapping, represent_mapping)
# We need this because OrderedDict is not "strictly" a Mapping.
yaml.SafeDumper.add_representer(OrderedDict, represent_mapping)
return partial(
yaml.safe_dump,
default_flow_style=False,
allow_unicode=True,
)
| {
"repo_name": "clchiou/garage",
"path": "py/garage/garage/formatters/__init__.py",
"copies": "1",
"size": "1234",
"license": "mit",
"hash": 457118538008033860,
"line_mean": 26.4222222222,
"line_max": 74,
"alpha_frac": 0.6936790924,
"autogenerated": false,
"ratio": 4.127090301003345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5320769393403345,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'make_read_write_lock',
]
import threading
from g1.bases.assertions import ASSERT
def make_read_write_lock():
rwlock = ReadWriteLock()
return (
LockLike(rwlock.reader_acquire, rwlock.reader_release),
LockLike(rwlock.writer_acquire, rwlock.writer_release),
)
class LockLike:
def __init__(self, acquire, release):
self.acquire = acquire
self.release = release
def __enter__(self):
self.acquire()
def __exit__(self, *_):
self.release()
class ReadWriteLock:
"""Readers-writer lock.
The writer part of the lock is pretty much like an ordinary lock,
but the readers part of the lock, at the current implementation, is
somehow like a reentrant lock (the same thread may acquire a reader
lock multiple times).
NOTE: stdlib's Lock.acquire takes both blocking and timeout
arguments even though just timeout is sufficient in all use cases.
I think the blocking argument is there just to maintain backward
compatibility. stdlib's Lock.acquire's interface is complicated
because of this; so I would prefer omitting blocking argument,
breaking compatibility with stdlib.
"""
def __init__(self):
self._lock = threading.Lock()
self._reader_cond = threading.Condition(self._lock)
self._num_readers = 0
self._writer_cond = threading.Condition(self._lock)
self._num_writers = 0
def reader_acquire(self, *, timeout=None):
with self._lock:
if not self._reader_cond.wait_for(
lambda: self._num_writers == 0,
timeout=timeout,
):
return False
self._num_readers += 1
return True
def reader_release(self):
with self._lock:
ASSERT.greater(self._num_readers, 0)
ASSERT.equal(self._num_writers, 0)
self._num_readers -= 1
if self._num_readers == 0:
self._writer_cond.notify()
def writer_acquire(self, *, timeout=None):
with self._lock:
if not self._writer_cond.wait_for(
lambda: self._num_readers == 0 and self._num_writers == 0,
timeout=timeout,
):
return False
self._num_writers += 1
return True
def writer_release(self):
with self._lock:
ASSERT.equal(self._num_readers, 0)
ASSERT.equal(self._num_writers, 1)
self._num_writers = 0
self._reader_cond.notify_all()
self._writer_cond.notify()
| {
"repo_name": "clchiou/garage",
"path": "py/g1/threads/g1/threads/locks.py",
"copies": "1",
"size": "2624",
"license": "mit",
"hash": -1552650514924904400,
"line_mean": 28.8181818182,
"line_max": 74,
"alpha_frac": 0.5910823171,
"autogenerated": false,
"ratio": 4.185007974481659,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 88
} |
__all__ = [
'make_server_socket',
'make_ssl_context',
]
import socket
import ssl
from g1.asyncs.bases import adapters
def make_server_socket(
address,
*,
family=socket.AF_INET,
backlog=128,
reuse_address=False,
reuse_port=False,
ssl_context=None,
):
sock = socket.socket(family, socket.SOCK_STREAM)
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, reuse_address)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, reuse_port)
sock.bind(address)
sock.listen(backlog)
if ssl_context:
sock = ssl_context.wrap_socket(sock, server_side=True)
except Exception:
sock.close()
raise
return adapters.SocketAdapter(sock)
def make_ssl_context(
certificate,
private_key,
client_authentication=False,
protocols=(),
):
if not certificate or not private_key:
return None
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(certificate, private_key)
if client_authentication:
ssl_context.verify_mode = ssl.CERT_REQUIRED
ssl_context.load_verify_locations(cafile=certificate)
if protocols:
if ssl.HAS_ALPN:
ssl_context.set_alpn_protocols(protocols)
if ssl.HAS_NPN:
ssl_context.set_npn_protocols(protocols)
return ssl_context
| {
"repo_name": "clchiou/garage",
"path": "py/g1/networks/servers/g1/networks/servers/sockets.py",
"copies": "1",
"size": "1395",
"license": "mit",
"hash": 3296465647869094000,
"line_mean": 25.320754717,
"line_max": 78,
"alpha_frac": 0.6566308244,
"autogenerated": false,
"ratio": 3.549618320610687,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4706249145010687,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'make_server_socket',
'serve',
'synchronous',
]
from functools import wraps
from curio import socket
import curio
from garage import asyncs
from garage.assertions import ASSERT
def make_server_socket(
address, *,
family=socket.AF_INET,
backlog=128,
reuse_address=True,
reuse_port=False):
sock = socket.socket(family, socket.SOCK_STREAM)
try:
if reuse_address:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
if reuse_port:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, True)
sock.bind(address)
sock.listen(backlog)
except Exception:
# XXX: I would prefer a non-async make_server_socket and that
# forbids me calling sock.close() here bucause it's async; so I
# have to call the underlying socket object's close() directly.
# Since no one else is referencing to this sock object, this
# hack should be fine.
sock._socket.close()
raise
else:
return sock
async def serve(graceful_exit, make_server_socket, handle_client, *,
make_ssl_context=None,
logger=None):
if logger is None:
import logging
logger = logging.getLogger(__name__)
connections = {}
async def accept_clients(handlers):
async with make_server_socket() as server_socket:
if make_ssl_context:
server_socket = make_ssl_context().wrap_socket(
server_socket, server_side=True)
while True:
sock, addr = await server_socket.accept()
logger.debug('serve client from: %r', addr)
handler = await handlers.spawn(handle_client(sock, addr))
connections[handler] = sock
async def join_client_handlers(handlers):
async for handler in handlers:
connections.pop(handler, None)
if handler.exception:
logger.error(
'err in client handler: %r',
handler, exc_info=handler.exception,
)
async with asyncs.TaskSet() as handlers, asyncs.TaskStack() as stack:
joiner = await stack.spawn(join_client_handlers(handlers))
acceptor = await stack.spawn(accept_clients(handlers))
await stack.spawn(graceful_exit.wait())
task = await stack.wait_any()
if task in (joiner, acceptor):
logger.error('server task is terminated: %r', task)
return await task.join()
ASSERT.true(graceful_exit.is_set())
logger.info('initiate graceful exit')
await acceptor.cancel()
handlers.graceful_exit()
# If it's not a graceful exit, the tasks will be cancelled; so
# we don't need to close sockets on that case, right?
for conn in connections.values():
await asyncs.close_socket_and_wakeup_task(conn)
await joiner.join()
def synchronous(coro_func):
"""Transform the decorated coroutine function into a synchronous
function.
"""
@wraps(coro_func)
def wrapper(*args, **kwargs):
return curio.run(coro_func(*args, **kwargs))
return wrapper
| {
"repo_name": "clchiou/garage",
"path": "py/garage/garage/asyncs/utils.py",
"copies": "1",
"size": "3267",
"license": "mit",
"hash": -6377752611726776000,
"line_mean": 30.7184466019,
"line_max": 73,
"alpha_frac": 0.6036118763,
"autogenerated": false,
"ratio": 4.114609571788413,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5218221448088413,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'make_tables',
]
from sqlalchemy import (
Column,
Table,
# Column types.
BigInteger,
Integer,
LargeBinary,
)
from g1.bases import collections as g1_collections
# Type aliases.
Revision = BigInteger
Key = LargeBinary
Value = LargeBinary
Lease = BigInteger
Expiration = BigInteger
def make_tables(metadata):
return g1_collections.Namespace(
# Store the current revision of the keyspace. This is not
# compacted and is monotonically increasing. This table should
# have at most one row at any given time.
current_revision=Table(
'current_revision',
metadata,
Column('revision', Revision, primary_key=True),
),
# Store the current keyspace, which is denormalized from the
# revisions table so that the current keyspace is never
# compacted. Also this table supports faster lookup in common
# use cases.
keyspace=Table(
'keyspace',
metadata,
Column('key_id', Integer, primary_key=True),
Column('revision', Revision, nullable=False),
Column('key', Key, nullable=False, index=True, unique=True),
Column('value', Value, nullable=False),
),
# Store revisions (create/update/delete) of pairs. Note that
# this they are not revisions of keyspaces and they may get
# compacted; so the history may be incomplete.
revisions=Table(
'revisions',
metadata,
Column('revision', Revision, primary_key=True),
Column('key', Key, primary_key=True),
Column('value', Value),
),
leases=Table(
'leases',
metadata,
Column('lease', Lease, primary_key=True),
Column('expiration', Expiration, nullable=False, index=True),
),
leases_key_ids=Table(
'leases_key_ids',
metadata,
Column('lease', Lease, primary_key=True),
Column('key_id', Integer, primary_key=True),
),
)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/operations/databases/servers/g1/operations/databases/servers/schemas.py",
"copies": "1",
"size": "2117",
"license": "mit",
"hash": -6165557596501174000,
"line_mean": 30.1323529412,
"line_max": 73,
"alpha_frac": 0.5843174303,
"autogenerated": false,
"ratio": 4.320408163265306,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5404725593565306,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.