repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
marcelm/cutadapt | src/cutadapt/align.py | 1 | 6405 | __all__ = [
"EndSkip",
"Aligner",
"PrefixComparer",
"SuffixComparer",
"hamming_sphere",
"hamming_environment",
"edit_environment",
"edit_distance",
]
from enum import IntFlag
from typing import Iterator, Tuple
from cutadapt._align import Aligner, PrefixComparer, SuffixComparer
class EndSkip(IntFlag):
"""
Flags for the Aligner that indicate which ends of reference or query may be skipped at
no cost. Setting all four flags at the same time results in semiglobal alignment.
"""
REFERENCE_START = 1 # a prefix of the reference may be skipped at no cost
QUERY_START = 2 # a prefix of the query may be skipped at no cost
REFERENCE_END = 4 # a suffix of the reference may be skipeed at no cost
QUERY_STOP = 8 # a suffix of the query may be skipeed at no cost
SEMIGLOBAL = 15 # all of the above
def edit_distance(s: str, t: str) -> int:
"""
Return the edit distance between the strings s and t.
The edit distance is the sum of the numbers of insertions, deletions,
and mismatches that is minimally necessary to transform one string
into the other.
"""
m = len(s) # index i
n = len(t) # index j
costs = list(range(m + 1))
for j in range(1, n + 1):
prev = costs[0]
costs[0] += 1
for i in range(1, m + 1):
match = int(s[i - 1] == t[j - 1])
c = min(
prev + 1 - match,
costs[i] + 1,
costs[i - 1] + 1,
)
prev = costs[i]
costs[i] = c
return costs[-1]
def hamming_sphere(s: str, k: int) -> Iterator[str]:
"""
Yield all strings t for which the hamming distance between s and t is exactly k,
assuming the alphabet is A, C, G, T.
"""
assert k >= 0
if k == 0:
yield s
return
n = len(s)
# i is the first position that is varied
for i in range(n - k + 1):
prefix = s[:i]
c = s[i]
suffix = s[i + 1 :]
for ch in "ACGT":
if ch == c:
continue
for t in hamming_sphere(suffix, k - 1):
y = prefix + ch + t
assert len(y) == n
yield y
def hamming_environment(s: str, k: int) -> Iterator[Tuple[str, int, int]]:
"""
Find all strings t for which the hamming distance between s and t is at most k,
assuming the alphabet is A, C, G, T.
Yield tuples (t, e, m), where e is the hamming distance between s and t and
m is the number of matches (equal to len(t) - e).
"""
n = len(s)
for e in range(k + 1):
for t in hamming_sphere(s, e):
yield t, e, n - e
def naive_edit_environment(s: str, k: int) -> Iterator[str]:
"""
Apply all possible edits up to edit distance k to string s
and yield the resulting strings.
A string may be returned more than once.
"""
yield s
if k == 0:
return
for s in naive_edit_environment(s, k - 1):
n = len(s)
for ch in "ACGT":
for i in range(n):
prefix = s[:i] + ch
yield prefix + s[i:] # insertion
yield prefix + s[i + 1 :] # substitution
yield s + ch # insertion into final position
# all deletions
for i in range(n):
yield s[:i] + s[i + 1 :]
def edit_environment(s: str, k: int) -> Iterator[Tuple[str, int, int]]:
"""
Find all strings t for which the edit distance between s and t is at most k,
assuming the alphabet is A, C, G, T.
Yield tuples (t, e, score), where e is the edit distance between s and t and
score is the score of the optimal alignment.
"""
rate = k / len(s) if s else 0
aligner = Aligner(s, max_error_rate=rate, flags=0, min_overlap=len(s))
seen = set()
for t in naive_edit_environment(s, k):
if t in seen:
continue
seen.add(t)
result = aligner.locate(t)
score, errors = result[-2:] # type: ignore
yield t, errors, score
def slow_edit_environment(s: str, k: int) -> Iterator[Tuple[str, int, int]]:
"""
Find all strings t for which the edit distance between s and t is at most k,
assuming the alphabet is A, C, G, T.
Yield tuples (t, e, m), where e is the edit distance between s and t and
m is the number of matches in the optimal alignment.
This is slow and only used in testing.
"""
n = len(s)
alphabet = "TGCA"
work_stack = [
(
"",
list(range(n + 1)),
[0] * (n + 1),
)
]
while work_stack:
# t is the current prefix
# costs is a row at index len(t) in the DP matrix
# matches is a row in the corresponding matrix of the no. of matches
t, costs, matches = work_stack.pop()
# The row is the last row of the DP matrix for aligning t against s
i = len(t)
if costs[-1] <= k:
# The costs of an optimal alignment of t against s are at most k,
# so t is within the edit environment.
yield t, costs[-1], matches[-1]
if i == n + k:
# Last row reached
continue
# Runtime heuristic: The entries in the DP matrix cannot get lower
# in subsequent rows, so don’t try longer suffixs if all entries are
# greater than k.
if min(costs) > k:
continue
# compute next row in DP matrix for all characters of the alphabet
for ch in alphabet:
# create a new DP matrix row for each character of the alphabet
next_costs = [0] * (n + 1)
next_costs[0] = len(t) + 1
next_matches = [0] * (n + 1)
for j in range(1, n + 1):
match = 0 if s[j - 1] == ch else 1
assert j > 0
diag = costs[j - 1] + match
left = next_costs[j - 1] + 1
up = costs[j] + 1
if diag <= left and diag <= up:
c, m = diag, matches[j - 1] + (1 - match)
elif left <= up:
c, m = left, next_matches[j - 1]
else:
c, m = up, matches[j]
next_costs[j] = c
next_matches[j] = m
work_stack.append((t + ch, next_costs, next_matches))
| mit | bc9f5aac950546530eaef25a1c5ba28c | 30.855721 | 90 | 0.537248 | 3.688364 | false | false | false | false |
tooxie/shiva-server | shiva/lyrics/letrascanciones.py | 2 | 2347 | import re
import urllib2
import requests
import lxml.html
from slugify import slugify
from shiva.lyrics import LyricScraper
from shiva.utils import get_logger
log = get_logger()
class MP3Lyrics(LyricScraper):
"""
"""
def __init__(self, artist, title):
self.artist = artist
self.title = title
self.lyrics = None
self.source = None
self.search_url = 'http://letrascanciones.mp3lyrics.org/Buscar/%s'
self.lyric_url_re = re.compile(r'href="(/[a-z0-9]{1}/[a-z0-9\-]+'
r'/[a-z0-9\-]+/)"')
self.lyric_re = re.compile(r'<div id="lyrics_text" .*?>(.*?)'
r'</div>', re.M + re.S)
self.title_re = re.compile(r'<title>(?P<title>.*?) Letras de '
r'Canciones de (?P<artist>.*?)</title>')
def fetch(self):
self.search()
if not self.source:
return None
response = requests.get(self.source)
self.html = response.text
if not self.check():
return False
log.info('[FOUND] %s' % self.source)
self.lyric_re.pattern
lyrics = self.lyric_re.findall(self.html)[0]
lyrics = re.sub(r'<span id="findmorespan">.*?</span>', '', lyrics)
lyrics = re.sub(r'<br[ /]?>', '\r', lyrics)
lyrics = lyrics[lyrics.find('\r\r'):]
self.lyrics = lyrics.strip()
return True
def search(self):
query = urllib2.quote('%s %s' % (self.artist, self.title))
log.info('[SEARCH] %s' % (self.search_url % query))
response = requests.get(self.search_url % query)
results = self.lyric_url_re.findall(response.text)
if results:
self.source = 'http://letrascanciones.mp3lyrics.org%s' % results[0]
def check(self):
match = self.title_re.search(self.html)
if slugify(match.group('artist')) != slugify(self.artist):
log.info('%s != %s' % (slugify(match.group('artist')),
slugify(self.artist)))
return False
if slugify(match.group('title')) != slugify(self.title):
log.info('%s != %s' % (slugify(match.group('title')),
slugify(self.title)))
return False
return True
| mit | 67fbdbb77698a77430caf9f526a0b9d0 | 29.881579 | 79 | 0.527482 | 3.466765 | false | false | false | false |
tooxie/shiva-server | shiva/media.py | 2 | 7914 | # -*- coding: utf-8 -*-
import os
import urllib2
from flask import current_app as app
from shiva.utils import get_logger
log = get_logger()
class MediaDir(object):
"""This object allows for media configuration. By instantiating a MediaDir
class you can tell Shiva where to look for the media files and how to serve
those files. It's possible to configure the system to look for files on a
directory and serve those files through a different server.
MediaDir(root='/srv/http', dirs=('/music', '/songs),
url='http://localhost:8080/')
Given that configuration Shiva will scan the directories /srv/http/music
and /srv/http/songs for media files, but they will be served through
http://localhost:8080/music/ and http://localhost:8080/songs/
If just a dir is provided Shiva will serve it through the same instance.
This is *NOT* recommended, but is useful for developing.
MediaDir('/home/fatmike/music')
"""
def __init__(self, root='/', dirs=tuple(), exclude=tuple(),
url='http://127.0.0.1:8001'):
"""If you provide just 1 argument it will be assumed as a path to
serve. Like:
MediaDir('/home/fatmike/music')
However, you can't just provide the dirs argument, you have to define
several MediaDirs.
If the dirs share the same root you can define them both at once:
MediaDir(root='/srv/http', dirs=('/music', '/songs'))
If you don't provide a ``url`` parameter, 'http://127.0.0.1:8001' will be
assumed.
"""
if type(root) not in (str, unicode):
raise TypeError("The 'root' attribute has to be a string.")
# MediaDir('/path/to/dir')
if not dirs and not url:
dirs = (root,)
root = '/'
# MediaDir('/path/to/dir', dirs='sub/path')
if type(dirs) != tuple:
raise TypeError("The 'dirs' attribute has to be a tuple.")
# MediaDir('/path/to/dir', exclude='sub/path')
if type(exclude) not in (tuple, str, unicode):
raise TypeError("The 'exclude' attribute has to be tuple or " +
'string.')
if type(exclude) in (str, unicode):
exclude = (exclude,)
# MediaDir(root='/', url='http://localhost')
if root == '/' and not dirs and url:
raise TypeError('Please define at least one directory different ' +
"from '/'.")
if url and type(url) not in (str, unicode):
raise TypeError('URL has to be a string.')
if url:
if not root:
raise TypeError('You need to supply a root directory for ' +
'this url.')
root = self.root_slashes(root)
dirs = tuple((self.dirs_slashes(d) for d in dirs))
exclude = tuple((self.dirs_slashes(xd) for xd in exclude))
if type(url) in (str, unicode) and not url.endswith('/'):
url += '/'
for d in dirs:
if d.startswith('/'):
raise TypeError("The 'dirs' tuple can't contain an absolute " +
'path')
if root.startswith(d):
raise TypeError("The 'dirs' tuple must be relative to " +
"'%s'." % root)
self.root = root
self.dirs = dirs
self.exclude = exclude
self.excluded_dirs = None
self.url = url
def root_slashes(self, path):
"""Removes the trailing slash, and makes sure the path begins with a
slash.
"""
if path == '/':
return path
path = path.rstrip('/')
if not path.startswith('/'):
path = '/%s' % path
return path
def dirs_slashes(self, path):
"""Removes the first slash, if exists, and makes sure the path has a
trailing slash.
"""
path = path.lstrip('/')
if not path.endswith('/'):
path += '/'
return path
def get_dirs(self):
"""Returns a list containing directories to look for multimedia files.
"""
dirs = []
if self.root:
if self.dirs:
for _dir in self.dirs:
dirs.append(os.path.join(self.root, _dir))
else:
dirs.append(self.root)
else:
if self.dirs:
for _dir in self.dirs:
dirs.append(_dir)
return dirs
def get_excluded_dirs(self):
if type(self.excluded_dirs) is list:
return self.excluded_dirs
if not self.exclude:
return []
self.excluded_dirs = []
_xdir = ''
# MediaDir('/path/to', dirs=('dir',), exclude='sub/path')
if self.dirs:
for d in self.dirs:
for xd in self.exclude:
_xdir = self.root_slashes(os.path.join(self.root, d, xd))
self.excluded_dirs.append(_xdir)
else:
for xd in self.exclude:
_xdir = self.root_slashes(os.path.join(self.root, xd))
self.excluded_dirs.append(_xdir)
return self.excluded_dirs
def _is_valid_path(self, path):
"""Validates that the given path exists.
"""
if not os.path.exists(path):
log.warn("Path '%s' does not exist. Ignoring." % path)
return False
return True
def get_valid_dirs(self):
"""Returns a list containing valid (existing) directories to look for
multimedia files.
"""
for path in self.get_dirs():
if self._is_valid_path(path):
yield path
# TODO: Simplify this method and document it better.
def urlize(self, path):
"""Given a path to a track, returns a URL pointing to it.
"""
url = None
dirs = self.get_dirs()
# FIXME: Be careful, urls could clash here. If there are similar paths
# under different media dirs we are going to end up with clashing URLs.
# For example:
# `/media/dir/flip/keep-rockin/track.mp3` and
# `/uploads/dir/flip/keep-rockin/track.mp3` will both be urlized to
# `http://127.0.0.1:8001/flip/keep-rockin/track.mp3`. Maybe the track's
# id could be included in the URL to discriminate them.
dirs.append(app.config.get('UPLOAD_PATH'))
for mdir in dirs:
if path.startswith(mdir):
# Remove trailing slash to avoid double-slashed URL.
url = path[(len(mdir) - 1):]
url = str(url.encode('utf-8'))
if self.url:
url = ''.join((self.url.rstrip('/'), urllib2.quote(url)))
return url
def allowed_to_stream(self, path):
"""
"""
for mdir in self.get_dirs():
if path.startswith(mdir):
return True
return False
class MimeType(object):
"""Represents a valid mimetype. Holds information like the codecs to be
used when converting.
"""
def __init__(self, type, subtype, extension, **kwargs):
self.type = type
self.subtype = subtype
self.extension = extension
self.acodec = kwargs.get('acodec')
self.vcodec = kwargs.get('vcodec')
def is_audio(self):
return self.type == 'audio'
def get_audio_codec(self):
return getattr(self, 'acodec')
def get_video_codec(self):
return getattr(self, 'vcodec')
def matches(self, mimetype):
return self.__repr__() == unicode(mimetype)
def __unicode__(self):
return u'%s/%s' % (self.type, self.subtype)
def __repr__(self):
return self.__unicode__()
def __str__(self):
return self.__unicode__()
def get_mimetypes():
return app.config.get('MIMETYPES', [])
| mit | df700fcc387c3b9508ab9d0fea3ad208 | 29.091255 | 79 | 0.550417 | 4.100518 | false | false | false | false |
datasift/datasift-python | datasift/identity.py | 1 | 3489 |
class Identity(object):
""" Represents the identity API and provides the ability to query it.
Internal class instantiated as part of the Client object. """
def __init__(self, request):
self.request = request.with_prefix('account/identity')
def list(self, label=None, per_page=20, page=1):
""" Get a list of identities that have been created
:param per_page: The number of results per page returned
:type per_page: int
:param page: The page number of the results
:type page: int
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`,
:class:`requests.exceptions.HTTPError`
"""
params = {'per_page': per_page, 'page': page}
if label:
params['label'] = label
return self.request.get('', params)
def get(self, id):
""" Get the identity ID
:param identity_id: The ID of the identity to retrieve
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`,
:class:`requests.exceptions.HTTPError`
"""
return self.request.get(str(id))
def create(self, label, status=None, master=None):
""" Create an Identity
:param label: The label to give this new identity
:param status: The status of this identity. Default: 'active'
:param master: Represents whether this identity is a master.
Default: False
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`,
:class:`requests.exceptions.HTTPError`
"""
params = {'label': label}
if status:
params['status'] = status
if master:
params['master'] = master
return self.request.post('', params)
def update(self, id, label=None, status=None, master=None):
""" Update an Identity
:param label: The label to give this new identity
:param status: The status of this identity. Default: 'active'
:param master: Represents whether this identity is a master.
Default: False
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`,
:class:`requests.exceptions.HTTPError`
"""
params = {}
if label:
params['label'] = label
if status:
params['status'] = status
if master:
params['master'] = master
return self.request.put(str(id), params)
def delete(self, id):
""" Delete an Identity
:param label: The label to give this new identity
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`,
:class:`requests.exceptions.HTTPError`
"""
return self.request.delete(str(id))
| mit | 745081d01c300ef436f6f74dd62b0c7a | 34.969072 | 73 | 0.588994 | 4.615079 | false | false | false | false |
tooxie/shiva-server | shiva/indexer/main.py | 2 | 13223 | # -*- coding: utf-8 -*-
"""Music indexer for the Shiva-Server API.
Index your music collection and (optionally) retrieve album covers and artist
pictures from Last.FM.
Usage:
shiva-indexer [-h] [-v] [-q] [--lastfm] [--hash] [--nometadata] [--reindex]
[--write-every=<num>] [--verbose-sql]
Options:
-h, --help Show this help message and exit
--lastfm Retrieve artist and album covers from Last.FM API.
--hash Hash each file to find (and ignore) duplicates.
--nometadata Don't read file's metadata when indexing.
--reindex Remove all existing data from the database before
indexing.
--write-every=<num> Write to disk and clear cache every <num> tracks
indexed.
--verbose-sql Print every SQL statement. Be careful, it's a little
too verbose.
-v --verbose Show debugging messages about the progress.
-q --quiet Suppress warnings.
"""
# K-Pg
from datetime import datetime
from time import time
import logging
import os
import sys
import traceback
from docopt import docopt
from sqlalchemy import func
from sqlalchemy.exc import OperationalError
from shiva import models as m
from shiva.app import app, db
from shiva.exceptions import MetadataManagerReadError
from shiva.indexer.cache import CacheManager
from shiva.indexer.lastfm import LastFM
from shiva.utils import ignored, get_logger
q = db.session.query
log = get_logger()
class Indexer(object):
VALID_FILE_EXTENSIONS = (
'asf', 'wma', # ASF
'flac', # FLAC
'mp4', 'm4a', 'm4b', 'm4p', # M4A
'ape', # Monkey's Audio
'mp3', # MP3
'mpc', 'mp+', 'mpp', # Musepack
'spx', # Ogg Speex
'ogg', 'oga', # Ogg Vorbis / Theora
'tta', # True Audio
'wv', # WavPack
'ofr', # OptimFROG
)
def __init__(self, config=None, use_lastfm=False, hash_files=False,
no_metadata=False, reindex=False, write_every=0):
self.config = config
self.use_lastfm = use_lastfm
self.hash_files = hash_files
self.no_metadata = no_metadata
self.reindex = reindex
self.write_every = write_every
self.empty_db = reindex
# If we are going to have only 1 track in cache at any time we might as
# well just ignore it completely.
use_cache = (write_every != 1)
self.cache = CacheManager(ram_cache=use_cache, use_db=not use_cache)
self.session = db.session
self.media_dirs = config.get('MEDIA_DIRS', [])
self.allowed_extensions = app.config.get('ALLOWED_FILE_EXTENSIONS',
self.VALID_FILE_EXTENSIONS)
self._ext = None
self._meta = None
self.track_count = 0
self.skipped_tracks = 0
self.count_by_extension = {}
for extension in self.allowed_extensions:
self.count_by_extension[extension] = 0
if self.use_lastfm:
self.lastfm = LastFM(api_key=config['LASTFM_API_KEY'],
use_cache=(write_every > 1))
if not len(self.media_dirs):
log.error("Remember to set the MEDIA_DIRS option, otherwise I "
"don't know where to look for.")
if reindex:
log.info('Dropping database...')
confirmed = raw_input('This will destroy all the information. '
'Proceed? [y/N] ') in ('y', 'Y')
if not confirmed:
log.error('Aborting.')
sys.exit(1)
db.drop_all()
log.info('Recreating database...')
db.create_all()
# This is useful to know if the DB is empty, and avoid some checks
if not self.reindex:
try:
m.Track.query.limit(1).all()
except OperationalError:
self.empty_db = True
def get_artist(self, name):
name = name.strip() if type(name) in (str, unicode) else None
if not name:
return None
artist = self.cache.get_artist(name)
if artist:
return artist
artist = m.Artist(name=name, image=self.get_artist_image(name))
self.session.add(artist)
self.cache.add_artist(artist)
return artist
def get_artist_image(self, name):
if self.use_lastfm:
return self.lastfm.get_artist_image(name)
return None
def get_album(self, name, artist):
name = name.strip() if type(name) in (str, unicode) else None
if not name or not artist:
return None
album = self.cache.get_album(name, artist)
if album:
return album
release_year = self.get_release_year(name, artist)
cover = self.get_album_cover(name, artist)
album = m.Album(name=name, year=release_year, cover=cover)
self.session.add(album)
self.cache.add_album(album, artist)
return album
def get_album_cover(self, album, artist):
if self.use_lastfm:
return self.lastfm.get_album_cover(album, artist)
return None
def get_release_year(self, album, artist):
if self.use_lastfm:
rdate = self.lastfm.get_release_date(album, artist)
return rdate.year if rdate else None
return self.get_metadata_reader().release_year
def add_to_session(self, track):
self.session.add(track)
ext = self.get_extension()
self.count_by_extension[ext] += 1
log.info('[ OK ] %s' % track.path)
return True
def skip(self, reason=None, print_traceback=None):
self.skipped_tracks += 1
if log.getEffectiveLevel() <= logging.INFO:
_reason = ' (%s)' % reason if reason else ''
log.info('[ SKIPPED ] %s%s' % (self.file_path, _reason))
if print_traceback:
log.info(traceback.format_exc())
return True
def commit(self, force=False):
if not force:
if not self.write_every:
return False
if self.track_count % self.write_every != 0:
return False
log.debug('Writing to database...')
self.session.commit()
if self.write_every > 1:
log.debug('Clearing cache')
self.cache.clear()
if self.use_lastfm:
self.lastfm.clear_cache()
return True
def save_track(self):
"""
Takes a path to a track, reads its metadata and stores everything in
the database.
"""
try:
full_path = self.file_path.decode('utf-8')
except UnicodeDecodeError:
self.skip('Unrecognized encoding', print_traceback=True)
# If file name is in an strange encoding ignore it.
return False
try:
track = m.Track(full_path, no_metadata=self.no_metadata,
hash_file=self.hash_files)
except MetadataManagerReadError:
self.skip('Corrupted file', print_traceback=True)
# If the metadata manager can't read the file, it's probably not an
# actual music file, or it's corrupted. Ignore it.
return False
if not self.empty_db:
if q(m.Track).filter_by(path=full_path).count():
self.skip()
return True
if self.hash_files:
if self.cache.hash_exists(track.hash):
self.skip('Duplicated file')
return True
if self.no_metadata:
self.add_to_session(track)
return True
meta = self.set_metadata_reader(track)
artist = self.get_artist(meta.artist)
album = self.get_album(meta.album, artist)
if album:
track.albums.append(album)
if artist:
track.artists.append(artist)
self.add_to_session(track)
self.cache.add_hash(track.hash)
self.commit()
def get_metadata_reader(self):
return self._meta
def set_metadata_reader(self, track):
self._meta = track.get_metadata_reader()
return self._meta
def get_extension(self):
return self.file_path.rsplit('.', 1)[1].lower()
def is_track(self):
"""Try to guess whether the file is a valid track or not."""
if not os.path.isfile(self.file_path):
return False
if '.' not in self.file_path:
return False
ext = self.get_extension()
if ext not in self.VALID_FILE_EXTENSIONS:
log.debug('[ SKIPPED ] %s (Unrecognized extension)' %
self.file_path)
return False
elif ext not in self.allowed_extensions:
log.debug('[ SKIPPED ] %s (Ignored extension)' % self.file_path)
return False
return True
def walk(self, target, exclude=tuple()):
"""Recursively walks through a directory looking for tracks."""
_ignored = ''
if not os.path.isdir(target):
return False
for root, dirs, files in os.walk(target):
if _ignored and root.startswith(_ignored):
# Is there a nicer way to express this?
continue
if root in exclude:
log.debug('[ SKIPPED ] %s (Excluded by config)' % root)
_ignored = root
continue
for name in files:
self.file_path = os.path.join(root, name)
if self.is_track():
self.track_count += 1
self.save_track()
def _make_unique(self, model):
"""
Retrieves all repeated slugs for a given model and appends the
instance's primary key to it.
"""
slugs = q(model).group_by(model.slug).\
having(func.count(model.slug) > 1)
for row in slugs:
slug = row.slug
for instance in q(model).filter_by(slug=row.slug):
instance.slug += '-%s' % instance.pk
return slugs
# SELECT pk, slug, COUNT(*) FROM tracks GROUP BY slug HAVING COUNT(*) > 1;
def make_slugs_unique(self):
query = self._make_unique(m.Artist)
self.session.add_all(query)
query = self._make_unique(m.Track)
self.session.add_all(query)
self.session.commit()
def print_stats(self):
if self.track_count == 0:
log.info('\nNo track indexed.\n')
return True
elapsed_time = self.final_time - self.initial_time
log.info('\nRun in %d seconds. Avg %.3fs/track.' % (
elapsed_time,
(elapsed_time / self.track_count)))
log.info('Found %d tracks. Skipped: %d. Indexed: %d.' % (
self.track_count,
self.skipped_tracks,
(self.track_count - self.skipped_tracks)))
for extension, count in self.count_by_extension.iteritems():
if count:
log.info('%s: %d tracks' % (extension, count))
def run(self):
self.initial_time = time()
for mobject in self.media_dirs:
for mdir in mobject.get_valid_dirs():
self.walk(mdir, exclude=mobject.get_excluded_dirs())
self.final_time = time()
def main():
arguments = docopt(__doc__)
if arguments['--quiet']:
log.setLevel(logging.ERROR)
elif arguments['--verbose']:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
if arguments['--verbose-sql']:
app.config['SQLALCHEMY_ECHO'] = True
kwargs = {
'use_lastfm': arguments['--lastfm'],
'hash_files': arguments['--hash'],
'no_metadata': arguments['--nometadata'],
'reindex': arguments['--reindex'],
'write_every': arguments['--write-every'],
}
if kwargs['no_metadata']:
kwargs['use_lastfm'] = False
if kwargs['use_lastfm'] and not app.config.get('LASTFM_API_KEY'):
sys.stderr.write('ERROR: You need a Last.FM API key if you set the '
'--lastfm flag.\n')
sys.exit(2)
try:
if kwargs['write_every'] is not None:
kwargs['write_every'] = int(kwargs['write_every'])
except TypeError:
error_values = (kwargs['write_every'], type(kwargs['write_every']))
sys.stderr.write('ERROR: Invalid value for --write-every, expected '
'<int>, got "%s" <%s>. instead' % error_values)
sys.exit(3)
# Generate database
db.create_all()
lola = Indexer(app.config, **kwargs)
lola.run()
lola.print_stats()
# Petit performance hack: Every track will be added to the session but they
# will be written down to disk only once, at the end. Unless the
# --write-every flag is set, then tracks are persisted in batch.
lola.commit(force=True)
log.debug('Checking for duplicated tracks...')
lola.make_slugs_unique()
| mit | bf0ae94ec4201a99136b96f3260faf9f | 29.608796 | 79 | 0.559933 | 3.962541 | false | false | false | false |
xgcm/xgcm | xgcm/comodo.py | 1 | 4763 | from collections import OrderedDict
# Representation of axis shifts
axis_shift_left = -0.5
axis_shift_right = 0.5
axis_shift_center = 0
# Characterizes valid shifts only
valid_axis_shifts = [axis_shift_left, axis_shift_right, axis_shift_center]
def assert_valid_comodo(ds):
"""Verify that the dataset meets comodo conventions
Parameters
----------
ds : xarray.dataset
"""
# TODO: implement
assert True
def get_all_axes(ds):
axes = set()
for d in ds.dims:
if "axis" in ds[d].attrs:
axes.add(ds[d].attrs["axis"])
return axes
def get_axis_coords(ds, axis_name):
"""Find the name of the coordinates associated with a comodo axis.
Parameters
----------
ds : xarray.dataset or xarray.dataarray
axis_name : str
The name of the axis to find (e.g. 'X')
Returns
-------
coord_name : list
The names of the coordinate matching that axis
"""
coord_names = []
for d in ds.dims:
axis = ds[d].attrs.get("axis")
if axis == axis_name:
coord_names.append(d)
return coord_names
def get_axis_positions_and_coords(ds, axis_name):
coord_names = get_axis_coords(ds, axis_name)
ncoords = len(coord_names)
if ncoords == 0:
# didn't find anything for this axis
raise ValueError("Couldn't find any coordinates for axis %s" % axis_name)
# now figure out what type of coordinates these are:
# center, left, right, or outer
coords = {name: ds[name] for name in coord_names}
# some tortured logic for dealing with malformed c_grid_axis_shift
# attributes such as produced by old versions of xmitgcm.
# This should be a float (either -0.5 or 0.5)
# this function returns that, or True of the attribute is set to
# anything at all
def _maybe_fix_type(attr):
if attr is not None:
try:
return float(attr)
except TypeError:
return True
axis_shift = {
name: _maybe_fix_type(coord.attrs.get("c_grid_axis_shift"))
for name, coord in coords.items()
}
coord_len = {name: len(coord) for name, coord in coords.items()}
# look for the center coord, which is required
# this list will potentially contain "center", "inner", and "outer" points
coords_without_axis_shift = {
name: coord_len[name] for name, shift in axis_shift.items() if not shift
}
if len(coords_without_axis_shift) == 0:
raise ValueError("Couldn't find a center coordinate for axis %s" % axis_name)
elif len(coords_without_axis_shift) > 1:
raise ValueError(
"Found two coordinates without "
"`c_grid_axis_shift` attribute for axis %s" % axis_name
)
center_coord_name = list(coords_without_axis_shift)[0]
# the length of the center coord is key to decoding the other coords
axis_len = coord_len[center_coord_name]
# now we can start filling in the information about the different coords
axis_coords = OrderedDict()
axis_coords["center"] = center_coord_name
# now check the other coords
coord_names.remove(center_coord_name)
for name in coord_names:
shift = axis_shift[name]
clen = coord_len[name]
if clen == axis_len + 1:
axis_coords["outer"] = name
elif clen == axis_len - 1:
axis_coords["inner"] = name
elif shift == axis_shift_left:
if clen == axis_len:
axis_coords["left"] = name
else:
raise ValueError(
"Left coordinate %s has incompatible "
"length %g (axis_len=%g)" % (name, clen, axis_len)
)
elif shift == axis_shift_right:
if clen == axis_len:
axis_coords["right"] = name
else:
raise ValueError(
"Right coordinate %s has incompatible "
"length %g (axis_len=%g)" % (name, clen, axis_len)
)
else:
if shift not in valid_axis_shifts:
# string representing valid axis shifts
valids = str(valid_axis_shifts)[1:-1]
raise ValueError(
"Coordinate %s has invalid "
"`c_grid_axis_shift` attribute `%s`. "
"`c_grid_axis_shift` must be one of: %s"
% (name, repr(shift), valids)
)
else:
raise ValueError(
"Coordinate %s has missing "
"`c_grid_axis_shift` attribute `%s`" % (name, repr(shift))
)
return axis_coords
def _assert_data_on_grid(da):
pass
| mit | f52c0412066cf85b2016c4e4806cf958 | 31.182432 | 85 | 0.569809 | 3.900901 | false | false | false | false |
samuelcolvin/pydantic | pydantic/mypy.py | 1 | 29756 | from configparser import ConfigParser
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type as TypingType, Union
from mypy.errorcodes import ErrorCode
from mypy.nodes import (
ARG_NAMED,
ARG_NAMED_OPT,
ARG_OPT,
ARG_POS,
ARG_STAR2,
MDEF,
Argument,
AssignmentStmt,
Block,
CallExpr,
ClassDef,
Context,
Decorator,
EllipsisExpr,
FuncBase,
FuncDef,
JsonDict,
MemberExpr,
NameExpr,
PassStmt,
PlaceholderNode,
RefExpr,
StrExpr,
SymbolNode,
SymbolTableNode,
TempNode,
TypeInfo,
TypeVarExpr,
Var,
)
from mypy.options import Options
from mypy.plugin import CheckerPluginInterface, ClassDefContext, MethodContext, Plugin, SemanticAnalyzerPluginInterface
from mypy.plugins import dataclasses
from mypy.semanal import set_callable_name # type: ignore
from mypy.server.trigger import make_wildcard_trigger
from mypy.types import (
AnyType,
CallableType,
Instance,
NoneType,
Type,
TypeOfAny,
TypeType,
TypeVarType,
UnionType,
get_proper_type,
)
from mypy.typevars import fill_typevars
from mypy.util import get_unique_redefinition_name
from mypy.version import __version__ as mypy_version
from pydantic.utils import is_valid_field
try:
from mypy.types import TypeVarDef # type: ignore[attr-defined]
except ImportError: # pragma: no cover
# Backward-compatible with TypeVarDef from Mypy 0.910.
from mypy.types import TypeVarType as TypeVarDef
CONFIGFILE_KEY = 'pydantic-mypy'
METADATA_KEY = 'pydantic-mypy-metadata'
BASEMODEL_FULLNAME = 'pydantic.main.BaseModel'
BASESETTINGS_FULLNAME = 'pydantic.env_settings.BaseSettings'
FIELD_FULLNAME = 'pydantic.fields.Field'
DATACLASS_FULLNAME = 'pydantic.dataclasses.dataclass'
BUILTINS_NAME = 'builtins' if float(mypy_version) >= 0.930 else '__builtins__'
def plugin(version: str) -> 'TypingType[Plugin]':
"""
`version` is the mypy version string
We might want to use this to print a warning if the mypy version being used is
newer, or especially older, than we expect (or need).
"""
return PydanticPlugin
class PydanticPlugin(Plugin):
def __init__(self, options: Options) -> None:
self.plugin_config = PydanticPluginConfig(options)
super().__init__(options)
def get_base_class_hook(self, fullname: str) -> 'Optional[Callable[[ClassDefContext], None]]':
sym = self.lookup_fully_qualified(fullname)
if sym and isinstance(sym.node, TypeInfo): # pragma: no branch
# No branching may occur if the mypy cache has not been cleared
if any(get_fullname(base) == BASEMODEL_FULLNAME for base in sym.node.mro):
return self._pydantic_model_class_maker_callback
return None
def get_method_hook(self, fullname: str) -> Optional[Callable[[MethodContext], Type]]:
if fullname.endswith('.from_orm'):
return from_orm_callback
return None
def get_class_decorator_hook(self, fullname: str) -> Optional[Callable[[ClassDefContext], None]]:
if fullname == DATACLASS_FULLNAME:
return dataclasses.dataclass_class_maker_callback # type: ignore[return-value]
return None
def _pydantic_model_class_maker_callback(self, ctx: ClassDefContext) -> None:
transformer = PydanticModelTransformer(ctx, self.plugin_config)
transformer.transform()
class PydanticPluginConfig:
__slots__ = ('init_forbid_extra', 'init_typed', 'warn_required_dynamic_aliases', 'warn_untyped_fields')
init_forbid_extra: bool
init_typed: bool
warn_required_dynamic_aliases: bool
warn_untyped_fields: bool
def __init__(self, options: Options) -> None:
if options.config_file is None: # pragma: no cover
return
toml_config = parse_toml(options.config_file)
if toml_config is not None:
config = toml_config.get('tool', {}).get('pydantic-mypy', {})
for key in self.__slots__:
setting = config.get(key, False)
if not isinstance(setting, bool):
raise ValueError(f'Configuration value must be a boolean for key: {key}')
setattr(self, key, setting)
else:
plugin_config = ConfigParser()
plugin_config.read(options.config_file)
for key in self.__slots__:
setting = plugin_config.getboolean(CONFIGFILE_KEY, key, fallback=False)
setattr(self, key, setting)
def from_orm_callback(ctx: MethodContext) -> Type:
"""
Raise an error if orm_mode is not enabled
"""
model_type: Instance
if isinstance(ctx.type, CallableType) and isinstance(ctx.type.ret_type, Instance):
model_type = ctx.type.ret_type # called on the class
elif isinstance(ctx.type, Instance):
model_type = ctx.type # called on an instance (unusual, but still valid)
else: # pragma: no cover
detail = f'ctx.type: {ctx.type} (of type {ctx.type.__class__.__name__})'
error_unexpected_behavior(detail, ctx.api, ctx.context)
return ctx.default_return_type
pydantic_metadata = model_type.type.metadata.get(METADATA_KEY)
if pydantic_metadata is None:
return ctx.default_return_type
orm_mode = pydantic_metadata.get('config', {}).get('orm_mode')
if orm_mode is not True:
error_from_orm(get_name(model_type.type), ctx.api, ctx.context)
return ctx.default_return_type
class PydanticModelTransformer:
tracked_config_fields: Set[str] = {
'extra',
'allow_mutation',
'frozen',
'orm_mode',
'allow_population_by_field_name',
'alias_generator',
}
def __init__(self, ctx: ClassDefContext, plugin_config: PydanticPluginConfig) -> None:
self._ctx = ctx
self.plugin_config = plugin_config
def transform(self) -> None:
"""
Configures the BaseModel subclass according to the plugin settings.
In particular:
* determines the model config and fields,
* adds a fields-aware signature for the initializer and construct methods
* freezes the class if allow_mutation = False or frozen = True
* stores the fields, config, and if the class is settings in the mypy metadata for access by subclasses
"""
ctx = self._ctx
info = self._ctx.cls.info
config = self.collect_config()
fields = self.collect_fields(config)
for field in fields:
if info[field.name].type is None:
if not ctx.api.final_iteration:
ctx.api.defer()
is_settings = any(get_fullname(base) == BASESETTINGS_FULLNAME for base in info.mro[:-1])
self.add_initializer(fields, config, is_settings)
self.add_construct_method(fields)
self.set_frozen(fields, frozen=config.allow_mutation is False or config.frozen is True)
info.metadata[METADATA_KEY] = {
'fields': {field.name: field.serialize() for field in fields},
'config': config.set_values_dict(),
}
def collect_config(self) -> 'ModelConfigData':
"""
Collects the values of the config attributes that are used by the plugin, accounting for parent classes.
"""
ctx = self._ctx
cls = ctx.cls
config = ModelConfigData()
for stmt in cls.defs.body:
if not isinstance(stmt, ClassDef):
continue
if stmt.name == 'Config':
for substmt in stmt.defs.body:
if not isinstance(substmt, AssignmentStmt):
continue
config.update(self.get_config_update(substmt))
if (
config.has_alias_generator
and not config.allow_population_by_field_name
and self.plugin_config.warn_required_dynamic_aliases
):
error_required_dynamic_aliases(ctx.api, stmt)
for info in cls.info.mro[1:]: # 0 is the current class
if METADATA_KEY not in info.metadata:
continue
# Each class depends on the set of fields in its ancestors
ctx.api.add_plugin_dependency(make_wildcard_trigger(get_fullname(info)))
for name, value in info.metadata[METADATA_KEY]['config'].items():
config.setdefault(name, value)
return config
def collect_fields(self, model_config: 'ModelConfigData') -> List['PydanticModelField']:
"""
Collects the fields for the model, accounting for parent classes
"""
# First, collect fields belonging to the current class.
ctx = self._ctx
cls = self._ctx.cls
fields = [] # type: List[PydanticModelField]
known_fields = set() # type: Set[str]
for stmt in cls.defs.body:
if not isinstance(stmt, AssignmentStmt): # `and stmt.new_syntax` to require annotation
continue
lhs = stmt.lvalues[0]
if not isinstance(lhs, NameExpr) or not is_valid_field(lhs.name):
continue
if not stmt.new_syntax and self.plugin_config.warn_untyped_fields:
error_untyped_fields(ctx.api, stmt)
# if lhs.name == '__config__': # BaseConfig not well handled; I'm not sure why yet
# continue
sym = cls.info.names.get(lhs.name)
if sym is None: # pragma: no cover
# This is likely due to a star import (see the dataclasses plugin for a more detailed explanation)
# This is the same logic used in the dataclasses plugin
continue
node = sym.node
if isinstance(node, PlaceholderNode): # pragma: no cover
# See the PlaceholderNode docstring for more detail about how this can occur
# Basically, it is an edge case when dealing with complex import logic
# This is the same logic used in the dataclasses plugin
continue
if not isinstance(node, Var): # pragma: no cover
# Don't know if this edge case still happens with the `is_valid_field` check above
# but better safe than sorry
continue
# x: ClassVar[int] is ignored by dataclasses.
if node.is_classvar:
continue
is_required = self.get_is_required(cls, stmt, lhs)
alias, has_dynamic_alias = self.get_alias_info(stmt)
if (
has_dynamic_alias
and not model_config.allow_population_by_field_name
and self.plugin_config.warn_required_dynamic_aliases
):
error_required_dynamic_aliases(ctx.api, stmt)
fields.append(
PydanticModelField(
name=lhs.name,
is_required=is_required,
alias=alias,
has_dynamic_alias=has_dynamic_alias,
line=stmt.line,
column=stmt.column,
)
)
known_fields.add(lhs.name)
all_fields = fields.copy()
for info in cls.info.mro[1:]: # 0 is the current class, -2 is BaseModel, -1 is object
if METADATA_KEY not in info.metadata:
continue
superclass_fields = []
# Each class depends on the set of fields in its ancestors
ctx.api.add_plugin_dependency(make_wildcard_trigger(get_fullname(info)))
for name, data in info.metadata[METADATA_KEY]['fields'].items():
if name not in known_fields:
field = PydanticModelField.deserialize(info, data)
known_fields.add(name)
superclass_fields.append(field)
else:
(field,) = (a for a in all_fields if a.name == name)
all_fields.remove(field)
superclass_fields.append(field)
all_fields = superclass_fields + all_fields
return all_fields
def add_initializer(self, fields: List['PydanticModelField'], config: 'ModelConfigData', is_settings: bool) -> None:
"""
Adds a fields-aware `__init__` method to the class.
The added `__init__` will be annotated with types vs. all `Any` depending on the plugin settings.
"""
ctx = self._ctx
typed = self.plugin_config.init_typed
use_alias = config.allow_population_by_field_name is not True
force_all_optional = is_settings or bool(
config.has_alias_generator and not config.allow_population_by_field_name
)
init_arguments = self.get_field_arguments(
fields, typed=typed, force_all_optional=force_all_optional, use_alias=use_alias
)
if not self.should_init_forbid_extra(fields, config):
var = Var('kwargs')
init_arguments.append(Argument(var, AnyType(TypeOfAny.explicit), None, ARG_STAR2))
add_method(ctx, '__init__', init_arguments, NoneType())
def add_construct_method(self, fields: List['PydanticModelField']) -> None:
"""
Adds a fully typed `construct` classmethod to the class.
Similar to the fields-aware __init__ method, but always uses the field names (not aliases),
and does not treat settings fields as optional.
"""
ctx = self._ctx
set_str = ctx.api.named_type(f'{BUILTINS_NAME}.set', [ctx.api.named_type(f'{BUILTINS_NAME}.str')])
optional_set_str = UnionType([set_str, NoneType()])
fields_set_argument = Argument(Var('_fields_set', optional_set_str), optional_set_str, None, ARG_OPT)
construct_arguments = self.get_field_arguments(fields, typed=True, force_all_optional=False, use_alias=False)
construct_arguments = [fields_set_argument] + construct_arguments
obj_type = ctx.api.named_type(f'{BUILTINS_NAME}.object')
self_tvar_name = '_PydanticBaseModel' # Make sure it does not conflict with other names in the class
tvar_fullname = ctx.cls.fullname + '.' + self_tvar_name
tvd = TypeVarDef(self_tvar_name, tvar_fullname, -1, [], obj_type)
self_tvar_expr = TypeVarExpr(self_tvar_name, tvar_fullname, [], obj_type)
ctx.cls.info.names[self_tvar_name] = SymbolTableNode(MDEF, self_tvar_expr)
# Backward-compatible with TypeVarDef from Mypy 0.910.
if isinstance(tvd, TypeVarType):
self_type = tvd
else:
self_type = TypeVarType(tvd) # type: ignore[call-arg]
add_method(
ctx,
'construct',
construct_arguments,
return_type=self_type,
self_type=self_type,
tvar_def=tvd,
is_classmethod=True,
)
def set_frozen(self, fields: List['PydanticModelField'], frozen: bool) -> None:
"""
Marks all fields as properties so that attempts to set them trigger mypy errors.
This is the same approach used by the attrs and dataclasses plugins.
"""
info = self._ctx.cls.info
for field in fields:
sym_node = info.names.get(field.name)
if sym_node is not None:
var = sym_node.node
assert isinstance(var, Var)
var.is_property = frozen
else:
var = field.to_var(info, use_alias=False)
var.info = info
var.is_property = frozen
var._fullname = get_fullname(info) + '.' + get_name(var)
info.names[get_name(var)] = SymbolTableNode(MDEF, var)
def get_config_update(self, substmt: AssignmentStmt) -> Optional['ModelConfigData']:
"""
Determines the config update due to a single statement in the Config class definition.
Warns if a tracked config attribute is set to a value the plugin doesn't know how to interpret (e.g., an int)
"""
lhs = substmt.lvalues[0]
if not (isinstance(lhs, NameExpr) and lhs.name in self.tracked_config_fields):
return None
if lhs.name == 'extra':
if isinstance(substmt.rvalue, StrExpr):
forbid_extra = substmt.rvalue.value == 'forbid'
elif isinstance(substmt.rvalue, MemberExpr):
forbid_extra = substmt.rvalue.name == 'forbid'
else:
error_invalid_config_value(lhs.name, self._ctx.api, substmt)
return None
return ModelConfigData(forbid_extra=forbid_extra)
if lhs.name == 'alias_generator':
has_alias_generator = True
if isinstance(substmt.rvalue, NameExpr) and substmt.rvalue.fullname == 'builtins.None':
has_alias_generator = False
return ModelConfigData(has_alias_generator=has_alias_generator)
if isinstance(substmt.rvalue, NameExpr) and substmt.rvalue.fullname in ('builtins.True', 'builtins.False'):
return ModelConfigData(**{lhs.name: substmt.rvalue.fullname == 'builtins.True'})
error_invalid_config_value(lhs.name, self._ctx.api, substmt)
return None
@staticmethod
def get_is_required(cls: ClassDef, stmt: AssignmentStmt, lhs: NameExpr) -> bool:
"""
Returns a boolean indicating whether the field defined in `stmt` is a required field.
"""
expr = stmt.rvalue
if isinstance(expr, TempNode):
# TempNode means annotation-only, so only non-required if Optional
value_type = get_proper_type(cls.info[lhs.name].type)
if isinstance(value_type, UnionType) and any(isinstance(item, NoneType) for item in value_type.items):
# Annotated as Optional, or otherwise having NoneType in the union
return False
return True
if isinstance(expr, CallExpr) and isinstance(expr.callee, RefExpr) and expr.callee.fullname == FIELD_FULLNAME:
# The "default value" is a call to `Field`; at this point, the field is
# only required if default is Ellipsis (i.e., `field_name: Annotation = Field(...)`)
return len(expr.args) > 0 and expr.args[0].__class__ is EllipsisExpr
# Only required if the "default value" is Ellipsis (i.e., `field_name: Annotation = ...`)
return isinstance(expr, EllipsisExpr)
@staticmethod
def get_alias_info(stmt: AssignmentStmt) -> Tuple[Optional[str], bool]:
"""
Returns a pair (alias, has_dynamic_alias), extracted from the declaration of the field defined in `stmt`.
`has_dynamic_alias` is True if and only if an alias is provided, but not as a string literal.
If `has_dynamic_alias` is True, `alias` will be None.
"""
expr = stmt.rvalue
if isinstance(expr, TempNode):
# TempNode means annotation-only
return None, False
if not (
isinstance(expr, CallExpr) and isinstance(expr.callee, RefExpr) and expr.callee.fullname == FIELD_FULLNAME
):
# Assigned value is not a call to pydantic.fields.Field
return None, False
for i, arg_name in enumerate(expr.arg_names):
if arg_name != 'alias':
continue
arg = expr.args[i]
if isinstance(arg, StrExpr):
return arg.value, False
else:
return None, True
return None, False
def get_field_arguments(
self, fields: List['PydanticModelField'], typed: bool, force_all_optional: bool, use_alias: bool
) -> List[Argument]:
"""
Helper function used during the construction of the `__init__` and `construct` method signatures.
Returns a list of mypy Argument instances for use in the generated signatures.
"""
info = self._ctx.cls.info
arguments = [
field.to_argument(info, typed=typed, force_optional=force_all_optional, use_alias=use_alias)
for field in fields
if not (use_alias and field.has_dynamic_alias)
]
return arguments
def should_init_forbid_extra(self, fields: List['PydanticModelField'], config: 'ModelConfigData') -> bool:
"""
Indicates whether the generated `__init__` should get a `**kwargs` at the end of its signature
We disallow arbitrary kwargs if the extra config setting is "forbid", or if the plugin config says to,
*unless* a required dynamic alias is present (since then we can't determine a valid signature).
"""
if not config.allow_population_by_field_name:
if self.is_dynamic_alias_present(fields, bool(config.has_alias_generator)):
return False
if config.forbid_extra:
return True
return self.plugin_config.init_forbid_extra
@staticmethod
def is_dynamic_alias_present(fields: List['PydanticModelField'], has_alias_generator: bool) -> bool:
"""
Returns whether any fields on the model have a "dynamic alias", i.e., an alias that cannot be
determined during static analysis.
"""
for field in fields:
if field.has_dynamic_alias:
return True
if has_alias_generator:
for field in fields:
if field.alias is None:
return True
return False
class PydanticModelField:
def __init__(
self, name: str, is_required: bool, alias: Optional[str], has_dynamic_alias: bool, line: int, column: int
):
self.name = name
self.is_required = is_required
self.alias = alias
self.has_dynamic_alias = has_dynamic_alias
self.line = line
self.column = column
def to_var(self, info: TypeInfo, use_alias: bool) -> Var:
name = self.name
if use_alias and self.alias is not None:
name = self.alias
return Var(name, info[self.name].type)
def to_argument(self, info: TypeInfo, typed: bool, force_optional: bool, use_alias: bool) -> Argument:
if typed and info[self.name].type is not None:
type_annotation = info[self.name].type
else:
type_annotation = AnyType(TypeOfAny.explicit)
return Argument(
variable=self.to_var(info, use_alias),
type_annotation=type_annotation,
initializer=None,
kind=ARG_NAMED_OPT if force_optional or not self.is_required else ARG_NAMED,
)
def serialize(self) -> JsonDict:
return self.__dict__
@classmethod
def deserialize(cls, info: TypeInfo, data: JsonDict) -> 'PydanticModelField':
return cls(**data)
class ModelConfigData:
def __init__(
self,
forbid_extra: Optional[bool] = None,
allow_mutation: Optional[bool] = None,
frozen: Optional[bool] = None,
orm_mode: Optional[bool] = None,
allow_population_by_field_name: Optional[bool] = None,
has_alias_generator: Optional[bool] = None,
):
self.forbid_extra = forbid_extra
self.allow_mutation = allow_mutation
self.frozen = frozen
self.orm_mode = orm_mode
self.allow_population_by_field_name = allow_population_by_field_name
self.has_alias_generator = has_alias_generator
def set_values_dict(self) -> Dict[str, Any]:
return {k: v for k, v in self.__dict__.items() if v is not None}
def update(self, config: Optional['ModelConfigData']) -> None:
if config is None:
return
for k, v in config.set_values_dict().items():
setattr(self, k, v)
def setdefault(self, key: str, value: Any) -> None:
if getattr(self, key) is None:
setattr(self, key, value)
ERROR_ORM = ErrorCode('pydantic-orm', 'Invalid from_orm call', 'Pydantic')
ERROR_CONFIG = ErrorCode('pydantic-config', 'Invalid config value', 'Pydantic')
ERROR_ALIAS = ErrorCode('pydantic-alias', 'Dynamic alias disallowed', 'Pydantic')
ERROR_UNEXPECTED = ErrorCode('pydantic-unexpected', 'Unexpected behavior', 'Pydantic')
ERROR_UNTYPED = ErrorCode('pydantic-field', 'Untyped field disallowed', 'Pydantic')
def error_from_orm(model_name: str, api: CheckerPluginInterface, context: Context) -> None:
api.fail(f'"{model_name}" does not have orm_mode=True', context, code=ERROR_ORM)
def error_invalid_config_value(name: str, api: SemanticAnalyzerPluginInterface, context: Context) -> None:
api.fail(f'Invalid value for "Config.{name}"', context, code=ERROR_CONFIG)
def error_required_dynamic_aliases(api: SemanticAnalyzerPluginInterface, context: Context) -> None:
api.fail('Required dynamic aliases disallowed', context, code=ERROR_ALIAS)
def error_unexpected_behavior(detail: str, api: CheckerPluginInterface, context: Context) -> None: # pragma: no cover
# Can't think of a good way to test this, but I confirmed it renders as desired by adding to a non-error path
link = 'https://github.com/samuelcolvin/pydantic/issues/new/choose'
full_message = f'The pydantic mypy plugin ran into unexpected behavior: {detail}\n'
full_message += f'Please consider reporting this bug at {link} so we can try to fix it!'
api.fail(full_message, context, code=ERROR_UNEXPECTED)
def error_untyped_fields(api: SemanticAnalyzerPluginInterface, context: Context) -> None:
api.fail('Untyped fields disallowed', context, code=ERROR_UNTYPED)
def add_method(
ctx: ClassDefContext,
name: str,
args: List[Argument],
return_type: Type,
self_type: Optional[Type] = None,
tvar_def: Optional[TypeVarDef] = None,
is_classmethod: bool = False,
is_new: bool = False,
# is_staticmethod: bool = False,
) -> None:
"""
Adds a new method to a class.
This can be dropped if/when https://github.com/python/mypy/issues/7301 is merged
"""
info = ctx.cls.info
# First remove any previously generated methods with the same name
# to avoid clashes and problems in the semantic analyzer.
if name in info.names:
sym = info.names[name]
if sym.plugin_generated and isinstance(sym.node, FuncDef):
ctx.cls.defs.body.remove(sym.node)
self_type = self_type or fill_typevars(info)
if is_classmethod or is_new:
first = [Argument(Var('_cls'), TypeType.make_normalized(self_type), None, ARG_POS)]
# elif is_staticmethod:
# first = []
else:
self_type = self_type or fill_typevars(info)
first = [Argument(Var('__pydantic_self__'), self_type, None, ARG_POS)]
args = first + args
arg_types, arg_names, arg_kinds = [], [], []
for arg in args:
assert arg.type_annotation, 'All arguments must be fully typed.'
arg_types.append(arg.type_annotation)
arg_names.append(get_name(arg.variable))
arg_kinds.append(arg.kind)
function_type = ctx.api.named_type(f'{BUILTINS_NAME}.function')
signature = CallableType(arg_types, arg_kinds, arg_names, return_type, function_type)
if tvar_def:
signature.variables = [tvar_def]
func = FuncDef(name, args, Block([PassStmt()]))
func.info = info
func.type = set_callable_name(signature, func)
func.is_class = is_classmethod
# func.is_static = is_staticmethod
func._fullname = get_fullname(info) + '.' + name
func.line = info.line
# NOTE: we would like the plugin generated node to dominate, but we still
# need to keep any existing definitions so they get semantically analyzed.
if name in info.names:
# Get a nice unique name instead.
r_name = get_unique_redefinition_name(name, info.names)
info.names[r_name] = info.names[name]
if is_classmethod: # or is_staticmethod:
func.is_decorated = True
v = Var(name, func.type)
v.info = info
v._fullname = func._fullname
# if is_classmethod:
v.is_classmethod = True
dec = Decorator(func, [NameExpr('classmethod')], v)
# else:
# v.is_staticmethod = True
# dec = Decorator(func, [NameExpr('staticmethod')], v)
dec.line = info.line
sym = SymbolTableNode(MDEF, dec)
else:
sym = SymbolTableNode(MDEF, func)
sym.plugin_generated = True
info.names[name] = sym
info.defn.defs.body.append(func)
def get_fullname(x: Union[FuncBase, SymbolNode]) -> str:
"""
Used for compatibility with mypy 0.740; can be dropped once support for 0.740 is dropped.
"""
fn = x.fullname
if callable(fn): # pragma: no cover
return fn()
return fn
def get_name(x: Union[FuncBase, SymbolNode]) -> str:
"""
Used for compatibility with mypy 0.740; can be dropped once support for 0.740 is dropped.
"""
fn = x.name
if callable(fn): # pragma: no cover
return fn()
return fn
def parse_toml(config_file: str) -> Optional[Dict[str, Any]]:
if not config_file.endswith('.toml'):
return None
read_mode = 'rb'
try:
import tomli as toml_
except ImportError:
# older versions of mypy have toml as a dependency, not tomli
read_mode = 'r'
try:
import toml as toml_ # type: ignore[no-redef]
except ImportError: # pragma: no cover
import warnings
warnings.warn('No TOML parser installed, cannot read configuration from `pyproject.toml`.')
return None
with open(config_file, read_mode) as rf:
return toml_.load(rf) # type: ignore[arg-type]
| mit | 2fd696410cb12f00a0fcb91ec9349b41 | 39.265223 | 120 | 0.61964 | 3.914233 | false | true | false | false |
samuelcolvin/pydantic | pydantic/typing.py | 1 | 18217 | import sys
from collections.abc import Callable
from os import PathLike
from typing import ( # type: ignore
TYPE_CHECKING,
AbstractSet,
Any,
Callable as TypingCallable,
ClassVar,
Dict,
ForwardRef,
Generator,
Iterable,
List,
Mapping,
NewType,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
_eval_type,
cast,
get_type_hints,
)
from typing_extensions import Annotated, Final, Literal
try:
from typing import _TypingBase as typing_base # type: ignore
except ImportError:
from typing import _Final as typing_base # type: ignore
try:
from typing import GenericAlias as TypingGenericAlias # type: ignore
except ImportError:
# python < 3.9 does not have GenericAlias (list[int], tuple[str, ...] and so on)
TypingGenericAlias = ()
try:
from types import UnionType as TypesUnionType # type: ignore
except ImportError:
# python < 3.10 does not have UnionType (str | int, byte | bool and so on)
TypesUnionType = ()
if sys.version_info < (3, 9):
def evaluate_forwardref(type_: ForwardRef, globalns: Any, localns: Any) -> Any:
return type_._evaluate(globalns, localns)
else:
def evaluate_forwardref(type_: ForwardRef, globalns: Any, localns: Any) -> Any:
# Even though it is the right signature for python 3.9, mypy complains with
# `error: Too many arguments for "_evaluate" of "ForwardRef"` hence the cast...
return cast(Any, type_)._evaluate(globalns, localns, set())
if sys.version_info < (3, 9):
# Ensure we always get all the whole `Annotated` hint, not just the annotated type.
# For 3.7 to 3.8, `get_type_hints` doesn't recognize `typing_extensions.Annotated`,
# so it already returns the full annotation
get_all_type_hints = get_type_hints
else:
def get_all_type_hints(obj: Any, globalns: Any = None, localns: Any = None) -> Any:
return get_type_hints(obj, globalns, localns, include_extras=True)
AnyCallable = TypingCallable[..., Any]
NoArgAnyCallable = TypingCallable[[], Any]
# Annotated[...] is implemented by returning an instance of one of these classes, depending on
# python/typing_extensions version.
AnnotatedTypeNames = {'AnnotatedMeta', '_AnnotatedAlias'}
if sys.version_info < (3, 8):
def get_origin(t: Type[Any]) -> Optional[Type[Any]]:
if type(t).__name__ in AnnotatedTypeNames:
# weirdly this is a runtime requirement, as well as for mypy
return cast(Type[Any], Annotated)
return getattr(t, '__origin__', None)
else:
from typing import get_origin as _typing_get_origin
def get_origin(tp: Type[Any]) -> Optional[Type[Any]]:
"""
We can't directly use `typing.get_origin` since we need a fallback to support
custom generic classes like `ConstrainedList`
It should be useless once https://github.com/cython/cython/issues/3537 is
solved and https://github.com/samuelcolvin/pydantic/pull/1753 is merged.
"""
if type(tp).__name__ in AnnotatedTypeNames:
return cast(Type[Any], Annotated) # mypy complains about _SpecialForm
return _typing_get_origin(tp) or getattr(tp, '__origin__', None)
if sys.version_info < (3, 8):
from typing import _GenericAlias
def get_args(t: Type[Any]) -> Tuple[Any, ...]:
"""Compatibility version of get_args for python 3.7.
Mostly compatible with the python 3.8 `typing` module version
and able to handle almost all use cases.
"""
if type(t).__name__ in AnnotatedTypeNames:
return t.__args__ + t.__metadata__
if isinstance(t, _GenericAlias):
res = t.__args__
if t.__origin__ is Callable and res and res[0] is not Ellipsis:
res = (list(res[:-1]), res[-1])
return res
return getattr(t, '__args__', ())
else:
from typing import get_args as _typing_get_args
def _generic_get_args(tp: Type[Any]) -> Tuple[Any, ...]:
"""
In python 3.9, `typing.Dict`, `typing.List`, ...
do have an empty `__args__` by default (instead of the generic ~T for example).
In order to still support `Dict` for example and consider it as `Dict[Any, Any]`,
we retrieve the `_nparams` value that tells us how many parameters it needs.
"""
if hasattr(tp, '_nparams'):
return (Any,) * tp._nparams
# Special case for `tuple[()]`, which used to return ((),) with `typing.Tuple`
# in python 3.10- but now returns () for `tuple` and `Tuple`.
# This will probably be clarified in pydantic v2
try:
if tp == Tuple[()] or sys.version_info >= (3, 9) and tp == tuple[()]: # type: ignore[misc]
return ((),)
# there is a TypeError when compiled with cython
except TypeError: # pragma: no cover
pass
return ()
def get_args(tp: Type[Any]) -> Tuple[Any, ...]:
"""Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples::
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
"""
if type(tp).__name__ in AnnotatedTypeNames:
return tp.__args__ + tp.__metadata__
# the fallback is needed for the same reasons as `get_origin` (see above)
return _typing_get_args(tp) or getattr(tp, '__args__', ()) or _generic_get_args(tp)
if sys.version_info < (3, 9):
def convert_generics(tp: Type[Any]) -> Type[Any]:
"""Python 3.9 and older only supports generics from `typing` module.
They convert strings to ForwardRef automatically.
Examples::
typing.List['Hero'] == typing.List[ForwardRef('Hero')]
"""
return tp
else:
from typing import _UnionGenericAlias # type: ignore
from typing_extensions import _AnnotatedAlias
def convert_generics(tp: Type[Any]) -> Type[Any]:
"""
Recursively searches for `str` type hints and replaces them with ForwardRef.
Examples::
convert_generics(list['Hero']) == list[ForwardRef('Hero')]
convert_generics(dict['Hero', 'Team']) == dict[ForwardRef('Hero'), ForwardRef('Team')]
convert_generics(typing.Dict['Hero', 'Team']) == typing.Dict[ForwardRef('Hero'), ForwardRef('Team')]
convert_generics(list[str | 'Hero'] | int) == list[str | ForwardRef('Hero')] | int
"""
origin = get_origin(tp)
if not origin or not hasattr(tp, '__args__'):
return tp
args = get_args(tp)
# typing.Annotated needs special treatment
if origin is Annotated:
return _AnnotatedAlias(convert_generics(args[0]), args[1:])
# recursively replace `str` instances inside of `GenericAlias` with `ForwardRef(arg)`
converted = tuple(
ForwardRef(arg) if isinstance(arg, str) and isinstance(tp, TypingGenericAlias) else convert_generics(arg)
for arg in args
)
if converted == args:
return tp
elif isinstance(tp, TypingGenericAlias):
return TypingGenericAlias(origin, converted)
elif isinstance(tp, TypesUnionType):
# recreate types.UnionType (PEP604, Python >= 3.10)
return _UnionGenericAlias(origin, converted)
else:
try:
setattr(tp, '__args__', converted)
except AttributeError:
pass
return tp
if sys.version_info < (3, 10):
def is_union(tp: Optional[Type[Any]]) -> bool:
return tp is Union
WithArgsTypes = (TypingGenericAlias,)
else:
import types
import typing
def is_union(tp: Optional[Type[Any]]) -> bool:
return tp is Union or tp is types.UnionType # noqa: E721
WithArgsTypes = (typing._GenericAlias, types.GenericAlias, types.UnionType)
if sys.version_info < (3, 9):
StrPath = Union[str, PathLike]
else:
StrPath = Union[str, PathLike]
# TODO: Once we switch to Cython 3 to handle generics properly
# (https://github.com/cython/cython/issues/2753), use following lines instead
# of the one above
# # os.PathLike only becomes subscriptable from Python 3.9 onwards
# StrPath = Union[str, PathLike[str]]
if TYPE_CHECKING:
from .fields import ModelField
TupleGenerator = Generator[Tuple[str, Any], None, None]
DictStrAny = Dict[str, Any]
DictAny = Dict[Any, Any]
SetStr = Set[str]
ListStr = List[str]
IntStr = Union[int, str]
AbstractSetIntStr = AbstractSet[IntStr]
DictIntStrAny = Dict[IntStr, Any]
MappingIntStrAny = Mapping[IntStr, Any]
CallableGenerator = Generator[AnyCallable, None, None]
ReprArgs = Sequence[Tuple[Optional[str], Any]]
AnyClassMethod = classmethod[Any]
__all__ = (
'ForwardRef',
'Callable',
'AnyCallable',
'NoArgAnyCallable',
'NoneType',
'is_none_type',
'display_as_type',
'resolve_annotations',
'is_callable_type',
'is_literal_type',
'all_literal_values',
'is_namedtuple',
'is_typeddict',
'is_new_type',
'new_type_supertype',
'is_classvar',
'is_finalvar',
'update_field_forward_refs',
'update_model_forward_refs',
'TupleGenerator',
'DictStrAny',
'DictAny',
'SetStr',
'ListStr',
'IntStr',
'AbstractSetIntStr',
'DictIntStrAny',
'CallableGenerator',
'ReprArgs',
'AnyClassMethod',
'CallableGenerator',
'WithArgsTypes',
'get_args',
'get_origin',
'get_sub_types',
'typing_base',
'get_all_type_hints',
'is_union',
'StrPath',
)
NoneType = None.__class__
NONE_TYPES: Tuple[Any, Any, Any] = (None, NoneType, Literal[None])
if sys.version_info < (3, 8):
# Even though this implementation is slower, we need it for python 3.7:
# In python 3.7 "Literal" is not a builtin type and uses a different
# mechanism.
# for this reason `Literal[None] is Literal[None]` evaluates to `False`,
# breaking the faster implementation used for the other python versions.
def is_none_type(type_: Any) -> bool:
return type_ in NONE_TYPES
elif sys.version_info[:2] == (3, 8):
# We can use the fast implementation for 3.8 but there is a very weird bug
# where it can fail for `Literal[None]`.
# We just need to redefine a useless `Literal[None]` inside the function body to fix this
def is_none_type(type_: Any) -> bool:
Literal[None] # fix edge case
for none_type in NONE_TYPES:
if type_ is none_type:
return True
return False
else:
def is_none_type(type_: Any) -> bool:
for none_type in NONE_TYPES:
if type_ is none_type:
return True
return False
def display_as_type(v: Type[Any]) -> str:
if not isinstance(v, typing_base) and not isinstance(v, WithArgsTypes) and not isinstance(v, type):
v = v.__class__
if is_union(get_origin(v)):
return f'Union[{", ".join(map(display_as_type, get_args(v)))}]'
if isinstance(v, WithArgsTypes):
# Generic alias are constructs like `list[int]`
return str(v).replace('typing.', '')
try:
return v.__name__
except AttributeError:
# happens with typing objects
return str(v).replace('typing.', '')
def resolve_annotations(raw_annotations: Dict[str, Type[Any]], module_name: Optional[str]) -> Dict[str, Type[Any]]:
"""
Partially taken from typing.get_type_hints.
Resolve string or ForwardRef annotations into type objects if possible.
"""
base_globals: Optional[Dict[str, Any]] = None
if module_name:
try:
module = sys.modules[module_name]
except KeyError:
# happens occasionally, see https://github.com/samuelcolvin/pydantic/issues/2363
pass
else:
base_globals = module.__dict__
annotations = {}
for name, value in raw_annotations.items():
if isinstance(value, str):
if (3, 10) > sys.version_info >= (3, 9, 8) or sys.version_info >= (3, 10, 1):
value = ForwardRef(value, is_argument=False, is_class=True)
else:
value = ForwardRef(value, is_argument=False)
try:
value = _eval_type(value, base_globals, None)
except NameError:
# this is ok, it can be fixed with update_forward_refs
pass
annotations[name] = value
return annotations
def is_callable_type(type_: Type[Any]) -> bool:
return type_ is Callable or get_origin(type_) is Callable
def is_literal_type(type_: Type[Any]) -> bool:
return Literal is not None and get_origin(type_) is Literal
def literal_values(type_: Type[Any]) -> Tuple[Any, ...]:
return get_args(type_)
def all_literal_values(type_: Type[Any]) -> Tuple[Any, ...]:
"""
This method is used to retrieve all Literal values as
Literal can be used recursively (see https://www.python.org/dev/peps/pep-0586)
e.g. `Literal[Literal[Literal[1, 2, 3], "foo"], 5, None]`
"""
if not is_literal_type(type_):
return (type_,)
values = literal_values(type_)
return tuple(x for value in values for x in all_literal_values(value))
def is_namedtuple(type_: Type[Any]) -> bool:
"""
Check if a given class is a named tuple.
It can be either a `typing.NamedTuple` or `collections.namedtuple`
"""
from .utils import lenient_issubclass
return lenient_issubclass(type_, tuple) and hasattr(type_, '_fields')
def is_typeddict(type_: Type[Any]) -> bool:
"""
Check if a given class is a typed dict (from `typing` or `typing_extensions`)
In 3.10, there will be a public method (https://docs.python.org/3.10/library/typing.html#typing.is_typeddict)
"""
from .utils import lenient_issubclass
return lenient_issubclass(type_, dict) and hasattr(type_, '__total__')
test_type = NewType('test_type', str)
def is_new_type(type_: Type[Any]) -> bool:
"""
Check whether type_ was created using typing.NewType
"""
return isinstance(type_, test_type.__class__) and hasattr(type_, '__supertype__') # type: ignore
def new_type_supertype(type_: Type[Any]) -> Type[Any]:
while hasattr(type_, '__supertype__'):
type_ = type_.__supertype__
return type_
def _check_classvar(v: Optional[Type[Any]]) -> bool:
if v is None:
return False
return v.__class__ == ClassVar.__class__ and getattr(v, '_name', None) == 'ClassVar'
def _check_finalvar(v: Optional[Type[Any]]) -> bool:
"""
Check if a given type is a `typing.Final` type.
"""
if v is None:
return False
return v.__class__ == Final.__class__ and (sys.version_info < (3, 8) or getattr(v, '_name', None) == 'Final')
def is_classvar(ann_type: Type[Any]) -> bool:
if _check_classvar(ann_type) or _check_classvar(get_origin(ann_type)):
return True
# this is an ugly workaround for class vars that contain forward references and are therefore themselves
# forward references, see #3679
if ann_type.__class__ == ForwardRef and ann_type.__forward_arg__.startswith('ClassVar['):
return True
return False
def is_finalvar(ann_type: Type[Any]) -> bool:
return _check_finalvar(ann_type) or _check_finalvar(get_origin(ann_type))
def update_field_forward_refs(field: 'ModelField', globalns: Any, localns: Any) -> None:
"""
Try to update ForwardRefs on fields based on this ModelField, globalns and localns.
"""
if field.type_.__class__ == ForwardRef:
field.type_ = evaluate_forwardref(field.type_, globalns, localns or None)
field.prepare()
if field.sub_fields:
for sub_f in field.sub_fields:
update_field_forward_refs(sub_f, globalns=globalns, localns=localns)
if field.discriminator_key is not None:
field.prepare_discriminated_union_sub_fields()
def update_model_forward_refs(
model: Type[Any],
fields: Iterable['ModelField'],
json_encoders: Dict[Union[Type[Any], str, ForwardRef], AnyCallable],
localns: 'DictStrAny',
exc_to_suppress: Tuple[Type[BaseException], ...] = (),
) -> None:
"""
Try to update model fields ForwardRefs based on model and localns.
"""
if model.__module__ in sys.modules:
globalns = sys.modules[model.__module__].__dict__.copy()
else:
globalns = {}
globalns.setdefault(model.__name__, model)
for f in fields:
try:
update_field_forward_refs(f, globalns=globalns, localns=localns)
except exc_to_suppress:
pass
for key in set(json_encoders.keys()):
if isinstance(key, str):
fr: ForwardRef = ForwardRef(key)
elif isinstance(key, ForwardRef):
fr = key
else:
continue
try:
new_key = evaluate_forwardref(fr, globalns, localns or None)
except exc_to_suppress: # pragma: no cover
continue
json_encoders[new_key] = json_encoders.pop(key)
def get_class(type_: Type[Any]) -> Union[None, bool, Type[Any]]:
"""
Tries to get the class of a Type[T] annotation. Returns True if Type is used
without brackets. Otherwise returns None.
"""
if get_origin(type_) is None:
return None
args = get_args(type_)
if not args or not isinstance(args[0], type):
return True
else:
return args[0]
def get_sub_types(tp: Any) -> List[Any]:
"""
Return all the types that are allowed by type `tp`
`tp` can be a `Union` of allowed types or an `Annotated` type
"""
origin = get_origin(tp)
if origin is Annotated:
return get_sub_types(get_args(tp)[0])
elif is_union(origin):
return [x for t in get_args(tp) for x in get_sub_types(t)]
else:
return [tp]
| mit | 383fdf69cde3dc6331c69b165cb24e46 | 31.015817 | 117 | 0.6175 | 3.656564 | false | false | false | false |
geopython/pycsw | pycsw/plugins/profiles/ebrim/__init__.py | 72 | 1311 | # -*- coding: utf-8 -*-
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2015 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
| mit | 4f7ff24e5b71d6bf3063ea81b68e410a | 44.206897 | 67 | 0.684973 | 4.567944 | false | false | false | false |
geopython/pycsw | tests/functionaltests/test_suites_functional.py | 5 | 14215 | # =================================================================
#
# Authors: Ricardo Garcia Silva <ricardo.garcia.silva@gmail.com>
# Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2017 Ricardo Garcia Silva
# Copyright (c) 2017 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
"""Functional tests for several test suites"""
import codecs
from difflib import SequenceMatcher
from difflib import unified_diff
from io import BytesIO
import json
import os
import re
import wsgiref.util
from lxml import etree
from lxml import objectify
import pytest
from pycsw import server
pytestmark = pytest.mark.functional
def test_suites(test_identifier, use_xml_canonicalisation,
save_results_directory, configuration, request_method,
request_data, expected_result, normalize_identifier_fields):
"""Test suites.
This function is automatically parametrized by pytest as a result of the
``conftest:pytest_generate_tests`` function. The input parameters are thus
supplied by pytest as a result of discovering and parsing the existing
test suites located under ``tests/functionaltests/suites``.
Parameters
----------
configuration: ConfigParser
The configuration to use with the pycsw server instance under test
request_method: str
The HTTP method of the request. Either GET or POST.
request_data: str
Either the path to the request file, for POST requests, or the request
parameters, for GET requests
expected_result: str
Path to the file that holds the expected result
normalize_identifier_fields: bool
Whether to normalize the identifier fields in responses. This
parameter is used only in the 'harvesting' and 'manager' suites
use_xml_canonicalisation: bool
Whether to compare results with their expected values by using xml
canonicalisation or simply by doing a diff.
save_results_directory: str
Path to a directory where to test results should be saved to. A value
of ``None`` (the default) means that results will not get saved to
disk.
"""
request_environment = _prepare_wsgi_test_environment(request_method,
request_data)
pycsw_server = server.Csw(rtconfig=configuration, env=request_environment)
encoding = "utf-8"
status, raw_contents = pycsw_server.dispatch_wsgi()
contents = raw_contents.decode(encoding)
with codecs.open(expected_result, encoding=encoding) as fh:
expected = fh.read()
normalized_result = _normalize(
contents,
normalize_identifiers=normalize_identifier_fields
)
if use_xml_canonicalisation:
print("Comparing results using XML canonicalization...")
matches_expected = _compare_with_xml_canonicalisation(
normalized_result, expected)
else:
print("Comparing results using diffs...")
matches_expected = _compare_without_xml_canonicalisation(
normalized_result, expected)
if not matches_expected and use_xml_canonicalisation:
print("expected: {0}".format(expected))
print("response: {0}".format(normalized_result))
if save_results_directory is not None:
_save_test_result(save_results_directory, normalized_result,
test_identifier, encoding)
assert matches_expected
def _compare_with_xml_canonicalisation(normalized_result, expected):
try:
matches_expected = _test_xml_result(normalized_result, expected)
except etree.XMLSyntaxError:
# the file is either not XML (perhaps JSON?) or malformed
matches_expected = _test_json_result(normalized_result, expected)
except etree.C14NError:
print("XML canonicalisation has failed. Trying to compare result "
"with expected using difflib")
matches_expected = _test_xml_diff(normalized_result, expected)
return matches_expected
def _compare_without_xml_canonicalisation(normalized_result, expected):
return _test_xml_diff(normalized_result, expected)
def _prepare_wsgi_test_environment(request_method, request_data):
"""Set up a testing environment for tests.
Parameters
----------
request_method: str
The HTTP method of the request. Sould be either GET or POST.
request_data: str
Either the path to the request file, for POST requests or the request
parameters, for GET requests.
Returns
-------
request_environment: dict
A mapping with the environment variables to use in the test
"""
request_environment = {
"REQUEST_METHOD": request_method.upper(),
"QUERY_STRING": "",
"REMOTE_ADDR": "127.0.0.1"
}
if request_method == "POST":
print("request_path: {0}".format(request_data))
request_buffer = BytesIO()
encoding = "utf-8"
with codecs.open(request_data, encoding=encoding) as fh:
contents = fh.read()
request_buffer.write(contents.encode(encoding))
request_environment["CONTENT_LENGTH"] = request_buffer.tell()
request_buffer.seek(0)
request_environment["wsgi.input"] = request_buffer
else:
print("Request contents: {0}".format(request_data))
request_environment["QUERY_STRING"] = request_data
wsgiref.util.setup_testing_defaults(request_environment)
return request_environment
def _test_xml_result(result, expected, encoding="utf-8"):
"""Compare the XML test results with an expected value.
This function compares the test result with the expected values by
performing XML canonicalization (c14n)[1]_, which compares the semantic
meanings of both XML files.
Parameters
----------
result: str
The result of running the test as a unicode string
expected: str
The expected outcome as a unicode string.
Returns
-------
bool
Whether the result matches the expectations or not.
Raises
------
etree.XMLSyntaxError
If any of the input parameters is not a valid XMl.
etree.C14NError
If any of the input parameters cannot be canonicalized. This may
happen if there are relative namespace URIs in any of the XML
documents, as they are explicitly not allowed when doing XML c14n
References
----------
.. [1] http://www.w3.org/TR/xml-c14n
"""
result_element = etree.fromstring(result.encode(encoding))
expected_element = etree.fromstring(expected.encode(encoding))
result_buffer = BytesIO()
result_tree = result_element.getroottree()
objectify.deannotate(result_tree, cleanup_namespaces=True)
result_tree.write_c14n(result_buffer)
expected_buffer = BytesIO()
expected_tree = expected_element.getroottree()
objectify.deannotate(expected_tree, cleanup_namespaces=True)
expected_tree.write_c14n(expected_buffer)
matches = result_buffer.getvalue() == expected_buffer.getvalue()
return matches
def _test_json_result(result, expected):
"""Compare the JSON test results with an expected value.
Parameters
----------
result: str
The result of running the test.
expected: str
The expected outcome.
Returns
-------
bool
Whether the result matches the expectations or not.
"""
result_dict = json.loads(result)
expected_dict = json.loads(expected)
return result_dict == expected_dict
def _test_xml_diff(result, expected):
"""Compare two XML strings by using python's ``difflib.SequenceMatcher``.
This is a character-by-character comparison and does not take into account
the semantic meaning of XML elements and attributes.
Parameters
----------
result: str
The result of running the test.
expected: str
The expected outcome.
Returns
-------
bool
Whether the result matches the expectations or not.
"""
sequence_matcher = SequenceMatcher(None, result, expected)
ratio = sequence_matcher.ratio()
matches = ratio == pytest.approx(1.0)
if not matches:
print("Result does not match expected.")
diff = unified_diff(result.splitlines(), expected.splitlines())
print("\n".join(list(diff)))
return matches
def _normalize(sresult, normalize_identifiers=False):
"""Normalize test output so it can be compared with the expected result.
Several dynamic elements of a pycsw response (such as time,
updateSequence, etc) are replaced with static constants to ease comparison.
Parameters
----------
sresult: str
The test result as a unicode string.
normalize_identifiers: bool, optional
Whether identifier fields should be normalized.
Returns
-------
str
The normalized response.
"""
# XML responses
version = re.search(r'<!-- (.*) -->', sresult)
updatesequence = re.search(r'updateSequence="(\S+)"', sresult)
timestamp = re.search(r'timestamp="(.*)"', sresult)
timestamp2 = re.search(r'timeStamp="(.*)"', sresult)
timestamp3 = re.search(
r'<oai:responseDate>(.*)</oai:responseDate>',
sresult
)
timestamp4 = re.search(
r'<oai:earliestDatestamp>(.*)</oai:earliestDatestamp>',
sresult
)
zrhost = re.search(r'<zr:host>(.*)</zr:host>', sresult)
zrport = re.search(r'<zr:port>(.*)</zr:port>', sresult)
elapsed_time = re.search(r'elapsedTime="(.*)"', sresult)
expires = re.search(r'expires="(.*?)"', sresult)
atom_updated = re.findall(r'<atom:updated>(.*)</atom:updated>',
sresult)
if version:
sresult = sresult.replace(version.group(0),
r'<!-- PYCSW_VERSION -->')
if updatesequence:
sresult = sresult.replace(updatesequence.group(0),
r'updateSequence="PYCSW_UPDATESEQUENCE"')
if timestamp:
sresult = sresult.replace(timestamp.group(0),
r'timestamp="PYCSW_TIMESTAMP"')
if timestamp2:
sresult = sresult.replace(timestamp2.group(0),
r'timeStamp="PYCSW_TIMESTAMP"')
if timestamp3:
sresult = sresult.replace(
timestamp3.group(0),
r'<oai:responseDate>PYCSW_TIMESTAMP</oai:responseDate>'
)
if timestamp4:
sresult = sresult.replace(
timestamp4.group(0),
r'<oai:earliestDatestamp>PYCSW_TIMESTAMP</oai:earliestDatestamp>'
)
if zrport:
sresult = sresult.replace(zrport.group(0),
r'<zr:port>PYCSW_PORT</zr:port>')
if zrhost:
sresult = sresult.replace(zrhost.group(0),
r'<zr:host>PYCSW_HOST</zr:host>')
if elapsed_time:
sresult = sresult.replace(elapsed_time.group(0),
r'elapsedTime="PYCSW_ELAPSED_TIME"')
if expires:
sresult = sresult.replace(expires.group(0),
r'expires="PYCSW_EXPIRES"')
for au in atom_updated:
sresult = sresult.replace(au, r'PYCSW_TIMESTAMP')
# for csw:HarvestResponse documents, mask identifiers
# which are dynamically generated for OWS endpoints
if sresult.find(r'HarvestResponse') != -1:
identifier = re.findall(
r'<dc:identifier>(\S+)</dc:identifier>',
sresult
)
for i in identifier:
sresult = sresult.replace(i, r'PYCSW_IDENTIFIER')
# JSON responses
timestamp = re.search(r'"@timestamp": "(.*?)"', sresult)
if timestamp:
sresult = sresult.replace(timestamp.group(0),
r'"@timestamp": "PYCSW_TIMESTAMP"')
# harvesting-based GetRecords/GetRecordById responses
if normalize_identifiers:
dcid = re.findall(
r'<dc:identifier>(urn:uuid.*)</dc:identifier>',
sresult
)
isoid = re.findall(r'id="(urn:uuid.*)"', sresult)
isoid2 = re.findall(
r'<gco:CharacterString>(urn:uuid.*)</gco',
sresult
)
for d in dcid:
sresult = sresult.replace(d, r'PYCSW_IDENTIFIER')
for i in isoid:
sresult = sresult.replace(i, r'PYCSW_IDENTIFIER')
for i2 in isoid2:
sresult = sresult.replace(i2, r'PYCSW_IDENTIFIER')
return sresult
def _save_test_result(target_directory_path, test_result, filename, encoding):
"""Save the input test result to disk"""
full_directory_path = os.path.abspath(
os.path.expanduser(os.path.expandvars(target_directory_path)))
try:
os.makedirs(full_directory_path)
except OSError as exc:
if exc.errno == 17: # directory already exists
pass
else:
raise
target_path = os.path.join(full_directory_path, filename)
with codecs.open(target_path, "w", encoding=encoding) as fh:
fh.write(test_result)
return target_path
| mit | be294920d79b0667920ed39756b5ea8a | 34.987342 | 79 | 0.642139 | 4.220606 | false | true | false | false |
geopython/pycsw | pycsw/ogc/csw/csw3.py | 2 | 106038 | # -*- coding: utf-8 -*-
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2016 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import json
import os
import sys
import cgi
from time import time
from urllib.parse import quote, unquote
from io import StringIO
from pycsw.core.etree import etree
from pycsw.ogc.csw.cql import cql2fes
from pycsw import oaipmh, opensearch, sru
from pycsw.plugins.profiles import profile as pprofile
import pycsw.plugins.outputschemas
from pycsw.core import config, log, metadata, util
from pycsw.core.formats.fmt_json import xml2dict
from pycsw.ogc.fes import fes1, fes2
import logging
LOGGER = logging.getLogger(__name__)
class Csw3(object):
''' CSW 3.x server '''
def __init__(self, server_csw):
''' Initialize CSW3 '''
self.parent = server_csw
self.version = '3.0.0'
def getcapabilities(self):
''' Handle GetCapabilities request '''
serviceidentification = True
serviceprovider = True
operationsmetadata = True
filtercaps = False
languages = False
# validate acceptformats
LOGGER.info('Validating ows20:AcceptFormats')
LOGGER.debug(self.parent.context.model['operations']['GetCapabilities']['parameters']['acceptFormats']['values'])
if 'acceptformats' in self.parent.kvp:
bfound = False
for fmt in self.parent.kvp['acceptformats'].split(','):
if fmt in self.parent.context.model['operations']['GetCapabilities']['parameters']['acceptFormats']['values']:
self.parent.mimetype = fmt
bfound = True
break
if not bfound:
return self.exceptionreport('InvalidParameterValue',
'acceptformats', 'Invalid acceptFormats parameter value: %s' %
self.parent.kvp['acceptformats'])
if 'sections' in self.parent.kvp and self.parent.kvp['sections'] != '':
serviceidentification = False
serviceprovider = False
operationsmetadata = False
for section in self.parent.kvp['sections'].split(','):
if section == 'ServiceIdentification':
serviceidentification = True
if section == 'ServiceProvider':
serviceprovider = True
if section == 'OperationsMetadata':
operationsmetadata = True
if section == 'All':
serviceidentification = True
serviceprovider = True
operationsmetadata = True
filtercaps = True
languages = True
else:
filtercaps = True
languages = True
# check extra parameters that may be def'd by profiles
if self.parent.profiles is not None:
for prof in self.parent.profiles['loaded'].keys():
result = \
self.parent.profiles['loaded'][prof].check_parameters(self.parent.kvp)
if result is not None:
return self.exceptionreport(result['code'],
result['locator'], result['text'])
# @updateSequence: get latest update to repository
try:
updatesequence = \
util.get_time_iso2unix(self.parent.repository.query_insert())
except:
updatesequence = None
node = etree.Element(util.nspath_eval('csw30:Capabilities',
self.parent.context.namespaces),
nsmap=self.parent.context.namespaces, version='3.0.0',
updateSequence=str(updatesequence))
if 'updatesequence' in self.parent.kvp:
if int(self.parent.kvp['updatesequence']) == updatesequence:
return node
elif int(self.parent.kvp['updatesequence']) > updatesequence:
return self.exceptionreport('InvalidUpdateSequence',
'updatesequence',
'outputsequence specified (%s) is higher than server\'s \
updatesequence (%s)' % (self.parent.kvp['updatesequence'],
updatesequence))
node.attrib[util.nspath_eval('xsi:schemaLocation',
self.parent.context.namespaces)] = '%s %s/cat/csw/3.0/cswGetCapabilities.xsd' % \
(self.parent.context.namespaces['csw30'],
self.parent.config.get('server', 'ogc_schemas_base'))
metadata_main = dict(self.parent.config.items('metadata:main'))
if serviceidentification:
LOGGER.info('Writing section ServiceIdentification')
serviceidentification = etree.SubElement(node, \
util.nspath_eval('ows20:ServiceIdentification',
self.parent.context.namespaces))
etree.SubElement(serviceidentification,
util.nspath_eval('ows20:Title', self.parent.context.namespaces)).text = \
metadata_main.get('identification_title', 'missing')
etree.SubElement(serviceidentification,
util.nspath_eval('ows20:Abstract', self.parent.context.namespaces)).text = \
metadata_main.get('identification_abstract', 'missing')
keywords = etree.SubElement(serviceidentification,
util.nspath_eval('ows20:Keywords', self.parent.context.namespaces))
for k in \
metadata_main.get('identification_keywords').split(','):
etree.SubElement(
keywords, util.nspath_eval('ows20:Keyword',
self.parent.context.namespaces)).text = k
etree.SubElement(keywords,
util.nspath_eval('ows20:Type', self.parent.context.namespaces),
codeSpace='ISOTC211/19115').text = \
metadata_main.get('identification_keywords_type', 'missing')
etree.SubElement(serviceidentification,
util.nspath_eval('ows20:ServiceType', self.parent.context.namespaces),
codeSpace='OGC').text = 'CSW'
for stv in self.parent.context.model['parameters']['version']['values']:
etree.SubElement(serviceidentification,
util.nspath_eval('ows20:ServiceTypeVersion',
self.parent.context.namespaces)).text = stv
if self.parent.profiles is not None:
for prof in self.parent.profiles['loaded'].keys():
prof_name = self.parent.profiles['loaded'][prof].name
prof_val = self.parent.profiles['loaded'][prof].namespaces[prof_name]
etree.SubElement(serviceidentification,
util.nspath_eval('ows20:Profile',
self.parent.context.namespaces)).text = prof_val
etree.SubElement(serviceidentification,
util.nspath_eval('ows20:Fees', self.parent.context.namespaces)).text = \
metadata_main.get('identification_fees', 'missing')
etree.SubElement(serviceidentification,
util.nspath_eval('ows20:AccessConstraints',
self.parent.context.namespaces)).text = \
metadata_main.get('identification_accessconstraints', 'missing')
if serviceprovider:
LOGGER.info('Writing section ServiceProvider')
serviceprovider = etree.SubElement(node,
util.nspath_eval('ows20:ServiceProvider', self.parent.context.namespaces))
etree.SubElement(serviceprovider,
util.nspath_eval('ows20:ProviderName', self.parent.context.namespaces)).text = \
metadata_main.get('provider_name', 'missing')
providersite = etree.SubElement(serviceprovider,
util.nspath_eval('ows20:ProviderSite', self.parent.context.namespaces))
providersite.attrib[util.nspath_eval('xlink:type',
self.parent.context.namespaces)] = 'simple'
providersite.attrib[util.nspath_eval('xlink:href',
self.parent.context.namespaces)] = \
metadata_main.get('provider_url', 'missing')
servicecontact = etree.SubElement(serviceprovider,
util.nspath_eval('ows20:ServiceContact', self.parent.context.namespaces))
etree.SubElement(servicecontact,
util.nspath_eval('ows20:IndividualName',
self.parent.context.namespaces)).text = \
metadata_main.get('contact_name', 'missing')
etree.SubElement(servicecontact,
util.nspath_eval('ows20:PositionName',
self.parent.context.namespaces)).text = \
metadata_main.get('contact_position', 'missing')
contactinfo = etree.SubElement(servicecontact,
util.nspath_eval('ows20:ContactInfo', self.parent.context.namespaces))
phone = etree.SubElement(contactinfo, util.nspath_eval('ows20:Phone',
self.parent.context.namespaces))
etree.SubElement(phone, util.nspath_eval('ows20:Voice',
self.parent.context.namespaces)).text = \
metadata_main.get('contact_phone', 'missing')
etree.SubElement(phone, util.nspath_eval('ows20:Facsimile',
self.parent.context.namespaces)).text = \
metadata_main.get('contact_fax', 'missing')
address = etree.SubElement(contactinfo,
util.nspath_eval('ows20:Address', self.parent.context.namespaces))
etree.SubElement(address,
util.nspath_eval('ows20:DeliveryPoint',
self.parent.context.namespaces)).text = \
metadata_main.get('contact_address', 'missing')
etree.SubElement(address, util.nspath_eval('ows20:City',
self.parent.context.namespaces)).text = \
metadata_main.get('contact_city', 'missing')
etree.SubElement(address,
util.nspath_eval('ows20:AdministrativeArea',
self.parent.context.namespaces)).text = \
metadata_main.get('contact_stateorprovince', 'missing')
etree.SubElement(address,
util.nspath_eval('ows20:PostalCode',
self.parent.context.namespaces)).text = \
metadata_main.get('contact_postalcode', 'missing')
etree.SubElement(address,
util.nspath_eval('ows20:Country', self.parent.context.namespaces)).text = \
metadata_main.get('contact_country', 'missing')
etree.SubElement(address,
util.nspath_eval('ows20:ElectronicMailAddress',
self.parent.context.namespaces)).text = \
metadata_main.get('contact_email', 'missing')
url = etree.SubElement(contactinfo,
util.nspath_eval('ows20:OnlineResource', self.parent.context.namespaces))
url.attrib[util.nspath_eval('xlink:type',
self.parent.context.namespaces)] = 'simple'
url.attrib[util.nspath_eval('xlink:href',
self.parent.context.namespaces)] = \
metadata_main.get('contact_url', 'missing')
etree.SubElement(contactinfo,
util.nspath_eval('ows20:HoursOfService',
self.parent.context.namespaces)).text = \
metadata_main.get('contact_hours', 'missing')
etree.SubElement(contactinfo,
util.nspath_eval('ows20:ContactInstructions',
self.parent.context.namespaces)).text = \
metadata_main.get('contact_instructions', 'missing')
etree.SubElement(servicecontact,
util.nspath_eval('ows20:Role', self.parent.context.namespaces),
codeSpace='ISOTC211/19115').text = \
metadata_main.get('contact_role', 'missing')
if operationsmetadata:
LOGGER.info('Writing section OperationsMetadata')
operationsmetadata = etree.SubElement(node,
util.nspath_eval('ows20:OperationsMetadata',
self.parent.context.namespaces))
for operation in self.parent.context.model['operations_order']:
oper = etree.SubElement(operationsmetadata,
util.nspath_eval('ows20:Operation', self.parent.context.namespaces),
name=operation)
dcp = etree.SubElement(oper, util.nspath_eval('ows20:DCP',
self.parent.context.namespaces))
http = etree.SubElement(dcp, util.nspath_eval('ows20:HTTP',
self.parent.context.namespaces))
if self.parent.context.model['operations'][operation]['methods']['get']:
get = etree.SubElement(http, util.nspath_eval('ows20:Get',
self.parent.context.namespaces))
get.attrib[util.nspath_eval('xlink:type',\
self.parent.context.namespaces)] = 'simple'
get.attrib[util.nspath_eval('xlink:href',\
self.parent.context.namespaces)] = self.parent.config.get('server', 'url')
if self.parent.context.model['operations'][operation]['methods']['post']:
post = etree.SubElement(http, util.nspath_eval('ows20:Post',
self.parent.context.namespaces))
post.attrib[util.nspath_eval('xlink:type',
self.parent.context.namespaces)] = 'simple'
post.attrib[util.nspath_eval('xlink:href',
self.parent.context.namespaces)] = \
self.parent.config.get('server', 'url')
for parameter in \
sorted(self.parent.context.model['operations'][operation]['parameters']):
param = etree.SubElement(oper,
util.nspath_eval('ows20:Parameter',
self.parent.context.namespaces), name=parameter)
param.append(self._write_allowed_values(self.parent.context.model['operations'][operation]['parameters'][parameter]['values']))
if operation == 'GetRecords': # advertise queryables, MaxRecordDefault
for qbl in sorted(self.parent.repository.queryables.keys()):
if qbl not in ['_all', 'SupportedDublinCoreQueryables']:
param = etree.SubElement(oper,
util.nspath_eval('ows20:Constraint',
self.parent.context.namespaces), name=qbl)
param.append(self._write_allowed_values(self.parent.repository.queryables[qbl]))
if self.parent.profiles is not None:
for con in sorted(self.parent.context.model[\
'operations']['GetRecords']['constraints'].keys()):
param = etree.SubElement(oper,
util.nspath_eval('ows20:Constraint',
self.parent.context.namespaces), name=con)
param.append(self._write_allowed_values(self.parent.context.model['operations']['GetRecords']['constraints'][con]['values']))
extra_constraints = {
'OpenSearchDescriptionDocument': ['%s?mode=opensearch&service=CSW&version=3.0.0&request=GetCapabilities' % self.parent.config.get('server', 'url')],
'MaxRecordDefault': self.parent.context.model['constraints']['MaxRecordDefault']['values'],
}
for key in sorted(extra_constraints.keys()):
param = etree.SubElement(oper,
util.nspath_eval('ows20:Constraint',
self.parent.context.namespaces), name=key)
param.append(self._write_allowed_values(extra_constraints[key]))
if 'FederatedCatalogues' in self.parent.context.model['constraints']:
param = etree.SubElement(oper,
util.nspath_eval('ows20:Constraint',
self.parent.context.namespaces), name='FederatedCatalogues')
param.append(self._write_allowed_values(self.parent.context.model['constraints']['FederatedCatalogues']['values']))
for parameter in sorted(self.parent.context.model['parameters'].keys()):
param = etree.SubElement(operationsmetadata,
util.nspath_eval('ows20:Parameter', self.parent.context.namespaces),
name=parameter)
param.append(self._write_allowed_values(self.parent.context.model['parameters'][parameter]['values']))
for qbl in sorted(self.parent.repository.queryables.keys()):
if qbl == 'SupportedDublinCoreQueryables':
param = etree.SubElement(operationsmetadata,
util.nspath_eval('ows20:Constraint',
self.parent.context.namespaces), name='CoreQueryables')
param.append(self._write_allowed_values(self.parent.repository.queryables[qbl]))
for constraint in sorted(self.parent.context.model['constraints'].keys()):
param = etree.SubElement(operationsmetadata,
util.nspath_eval('ows20:Constraint', self.parent.context.namespaces),
name=constraint)
param.append(self._write_allowed_values(self.parent.context.model['constraints'][constraint]['values']))
if self.parent.profiles is not None:
for prof in self.parent.profiles['loaded'].keys():
ecnode = \
self.parent.profiles['loaded'][prof].get_extendedcapabilities()
if ecnode is not None:
operationsmetadata.append(ecnode)
if languages:
LOGGER.info('Writing section ows:Languages')
langs = etree.SubElement(node,
util.nspath_eval('ows20:Languages', self.parent.context.namespaces))
etree.SubElement(langs,
util.nspath_eval('ows20:Language', self.parent.context.namespaces)).text = self.parent.language['639_code']
if not filtercaps:
return node
# always write out Filter_Capabilities
LOGGER.info('Writing section Filter_Capabilities')
fltcaps = etree.SubElement(node,
util.nspath_eval('fes20:Filter_Capabilities', self.parent.context.namespaces))
conformance = etree.SubElement(fltcaps,
util.nspath_eval('fes20:Conformance', self.parent.context.namespaces))
for value in fes2.MODEL['Conformance']['values']:
constraint = etree.SubElement(conformance,
util.nspath_eval('fes20:Constraint', self.parent.context.namespaces),
name=value)
etree.SubElement(constraint,
util.nspath_eval('ows11:NoValues', self.parent.context.namespaces))
etree.SubElement(constraint,
util.nspath_eval('ows11:DefaultValue', self.parent.context.namespaces)).text = 'TRUE'
idcaps = etree.SubElement(fltcaps,
util.nspath_eval('fes20:Id_Capabilities', self.parent.context.namespaces))
for idcap in fes2.MODEL['Ids']['values']:
etree.SubElement(idcaps, util.nspath_eval('fes20:ResourceIdentifier',
self.parent.context.namespaces), name=idcap)
scalarcaps = etree.SubElement(fltcaps,
util.nspath_eval('fes20:Scalar_Capabilities', self.parent.context.namespaces))
etree.SubElement(scalarcaps, util.nspath_eval('fes20:LogicalOperators',
self.parent.context.namespaces))
cmpops = etree.SubElement(scalarcaps,
util.nspath_eval('fes20:ComparisonOperators', self.parent.context.namespaces))
for cmpop in sorted(fes2.MODEL['ComparisonOperators'].keys()):
etree.SubElement(cmpops,
util.nspath_eval('fes20:ComparisonOperator',
self.parent.context.namespaces), name=fes2.MODEL['ComparisonOperators'][cmpop]['opname'])
spatialcaps = etree.SubElement(fltcaps,
util.nspath_eval('fes20:Spatial_Capabilities', self.parent.context.namespaces))
geomops = etree.SubElement(spatialcaps,
util.nspath_eval('fes20:GeometryOperands', self.parent.context.namespaces))
for geomtype in \
fes2.MODEL['GeometryOperands']['values']:
etree.SubElement(geomops,
util.nspath_eval('fes20:GeometryOperand',
self.parent.context.namespaces), name=geomtype)
spatialops = etree.SubElement(spatialcaps,
util.nspath_eval('fes20:SpatialOperators', self.parent.context.namespaces))
for spatial_comparison in \
fes2.MODEL['SpatialOperators']['values']:
etree.SubElement(spatialops,
util.nspath_eval('fes20:SpatialOperator', self.parent.context.namespaces),
name=spatial_comparison)
functions = etree.SubElement(fltcaps,
util.nspath_eval('fes20:Functions', self.parent.context.namespaces))
for fnop in sorted(fes2.MODEL['Functions'].keys()):
fn = etree.SubElement(functions,
util.nspath_eval('fes20:Function', self.parent.context.namespaces),
name=fnop)
etree.SubElement(fn, util.nspath_eval('fes20:Returns',
self.parent.context.namespaces)).text = \
fes2.MODEL['Functions'][fnop]['returns']
return node
def getdomain(self):
''' Handle GetDomain request '''
if ('parametername' not in self.parent.kvp and
'valuereference' not in self.parent.kvp):
return self.exceptionreport('MissingParameterValue',
'parametername', 'Missing value. \
One of valuereference or parametername must be specified')
node = etree.Element(util.nspath_eval('csw30:GetDomainResponse',
self.parent.context.namespaces), nsmap=self.parent.context.namespaces)
node.attrib[util.nspath_eval('xsi:schemaLocation',
self.parent.context.namespaces)] = '%s %s/cat/csw/3.0/cswGetDomain.xsd' % \
(self.parent.context.namespaces['csw30'],
self.parent.config.get('server', 'ogc_schemas_base'))
if 'parametername' in self.parent.kvp:
for pname in self.parent.kvp['parametername'].split(','):
LOGGER.info('Parsing parametername %s', pname)
domainvalue = etree.SubElement(node,
util.nspath_eval('csw30:DomainValues', self.parent.context.namespaces),
type='csw30:Record', resultType='available')
etree.SubElement(domainvalue,
util.nspath_eval('csw30:ParameterName',
self.parent.context.namespaces)).text = pname
try:
operation, parameter = pname.split('.')
except:
return node
if (operation in self.parent.context.model['operations'] and
parameter in self.parent.context.model['operations'][operation]['parameters']):
listofvalues = etree.SubElement(domainvalue,
util.nspath_eval('csw30:ListOfValues', self.parent.context.namespaces))
for val in \
sorted(self.parent.context.model['operations'][operation]\
['parameters'][parameter]['values']):
etree.SubElement(listofvalues,
util.nspath_eval('csw30:Value',
self.parent.context.namespaces)).text = val
if 'valuereference' in self.parent.kvp:
for pname in self.parent.kvp['valuereference'].split(','):
LOGGER.info('Parsing valuereference %s', pname)
if pname.find('/') == 0: # it's an XPath
pname2 = pname
else: # it's a core queryable, map to internal typename model
try:
pname2 = self.parent.repository.queryables['_all'][pname]['dbcol']
except:
pname2 = pname
# decipher typename
dvtype = None
if self.parent.profiles is not None:
for prof in self.parent.profiles['loaded'].keys():
for prefix in self.parent.profiles['loaded'][prof].prefixes:
if pname2.find(prefix) != -1:
dvtype = self.parent.profiles['loaded'][prof].typename
break
if not dvtype:
dvtype = 'csw30:Record'
domainvalue = etree.SubElement(node,
util.nspath_eval('csw30:DomainValues', self.parent.context.namespaces),
type=dvtype, resultType='available')
etree.SubElement(domainvalue,
util.nspath_eval('csw30:ValueReference',
self.parent.context.namespaces)).text = pname
try:
LOGGER.debug(
'Querying repository property %s, typename %s, \
domainquerytype %s',
pname2, dvtype, self.parent.domainquerytype)
results = self.parent.repository.query_domain(
pname2, dvtype, self.parent.domainquerytype, True)
LOGGER.debug('Results: %d', len(results))
if self.parent.domainquerytype == 'range':
rangeofvalues = etree.SubElement(domainvalue,
util.nspath_eval('csw30:RangeOfValues',
self.parent.context.namespaces))
etree.SubElement(rangeofvalues,
util.nspath_eval('csw30:MinValue',
self.parent.context.namespaces)).text = results[0][0]
etree.SubElement(rangeofvalues,
util.nspath_eval('csw30:MaxValue',
self.parent.context.namespaces)).text = results[0][1]
else:
listofvalues = etree.SubElement(domainvalue,
util.nspath_eval('csw30:ListOfValues',
self.parent.context.namespaces))
for result in results:
LOGGER.debug(str(result))
if (result is not None and
result[0] is not None): # drop null values
etree.SubElement(listofvalues,
util.nspath_eval('csw30:Value',
self.parent.context.namespaces),
count=str(result[1])).text = result[0]
except Exception as err:
# here we fail silently back to the client because
# CSW tells us to
LOGGER.exception('No results for propertyname')
return node
def getrecords(self):
''' Handle GetRecords request '''
timestamp = util.get_today_and_now()
if ('elementsetname' not in self.parent.kvp and
'elementname' not in self.parent.kvp):
if self.parent.requesttype == 'GET':
LOGGER.debug(self.parent.requesttype)
self.parent.kvp['elementsetname'] = 'summary'
else:
# mutually exclusive required
return self.exceptionreport('MissingParameterValue',
'elementsetname',
'Missing one of ElementSetName or ElementName parameter(s)')
if 'elementsetname' in self.parent.kvp and 'elementname' in self.parent.kvp and self.parent.kvp['elementname']:
# mutually exclusive required
return self.exceptionreport('NoApplicableCode',
'elementsetname',
'Only ONE of ElementSetName or ElementName parameter(s) is permitted')
if 'elementsetname' not in self.parent.kvp:
self.parent.kvp['elementsetname'] = 'summary'
if 'outputschema' not in self.parent.kvp:
self.parent.kvp['outputschema'] = self.parent.context.namespaces['csw30']
LOGGER.debug(self.parent.context.model['operations']['GetRecords']['parameters']['outputSchema']['values'])
if (self.parent.kvp['outputschema'] not in self.parent.context.model['operations']
['GetRecords']['parameters']['outputSchema']['values']):
return self.exceptionreport('InvalidParameterValue',
'outputschema', 'Invalid outputSchema parameter value: %s' %
self.parent.kvp['outputschema'])
if 'outputformat' not in self.parent.kvp:
self.parent.kvp['outputformat'] = 'application/xml'
if 'HTTP_ACCEPT' in self.parent.environ:
LOGGER.debug('Detected HTTP Accept header: %s', self.parent.environ['HTTP_ACCEPT'])
formats_match = False
if 'outputformat' in self.parent.kvp:
LOGGER.debug(self.parent.kvp['outputformat'])
for ofmt in self.parent.environ['HTTP_ACCEPT'].split(','):
LOGGER.info('Comparing %s and %s', ofmt, self.parent.kvp['outputformat'])
if ofmt.split('/')[0] in self.parent.kvp['outputformat']:
LOGGER.debug('Found output match')
formats_match = True
if not formats_match and self.parent.environ['HTTP_ACCEPT'] != '*/*':
return self.exceptionreport('InvalidParameterValue',
'outputformat', 'HTTP Accept header (%s) and outputformat (%s) must agree' %
(self.parent.environ['HTTP_ACCEPT'], self.parent.kvp['outputformat']))
else:
for ofmt in self.parent.environ['HTTP_ACCEPT'].split(','):
if ofmt in self.parent.context.model['operations']['GetRecords']['parameters']['outputFormat']['values']:
self.parent.kvp['outputformat'] = ofmt
break
if (self.parent.kvp['outputformat'] not in self.parent.context.model['operations']
['GetRecords']['parameters']['outputFormat']['values']):
return self.exceptionreport('InvalidParameterValue',
'outputformat', 'Invalid outputFormat parameter value: %s' %
self.parent.kvp['outputformat'])
if 'outputformat' in self.parent.kvp:
LOGGER.info('Setting content type')
self.parent.contenttype = self.parent.kvp['outputformat']
if self.parent.kvp['outputformat'] == 'application/atom+xml':
self.parent.kvp['outputschema'] = self.parent.context.namespaces['atom']
self.parent.mode = 'opensearch'
if (('elementname' not in self.parent.kvp or
len(self.parent.kvp['elementname']) == 0) and
self.parent.kvp['elementsetname'] not in
self.parent.context.model['operations']['GetRecords']['parameters']
['ElementSetName']['values']):
return self.exceptionreport('InvalidParameterValue',
'elementsetname', 'Invalid ElementSetName parameter value: %s' %
self.parent.kvp['elementsetname'])
if 'typenames' not in self.parent.kvp:
return self.exceptionreport('MissingParameterValue',
'typenames', 'Missing typenames parameter')
if ('typenames' in self.parent.kvp and
self.parent.requesttype == 'GET'): # passed via GET
#self.parent.kvp['typenames'] = self.parent.kvp['typenames'].split(',')
self.parent.kvp['typenames'] = ['csw:Record' if x=='Record' else x for x in self.parent.kvp['typenames'].split(',')]
if 'namespace' in self.parent.kvp:
LOGGER.info('resolving KVP namespace bindings')
LOGGER.debug(self.parent.kvp['typenames'])
self.parent.kvp['typenames'] = self.resolve_nsmap(self.parent.kvp['typenames'])
if 'elementname' in self.parent.kvp:
LOGGER.debug(self.parent.kvp['elementname'])
self.parent.kvp['elementname'] = self.resolve_nsmap(self.parent.kvp['elementname'].split(','))
if 'typenames' in self.parent.kvp:
for tname in self.parent.kvp['typenames']:
#if tname == 'Record':
# tname = 'csw:Record'
if (tname not in self.parent.context.model['operations']['GetRecords']
['parameters']['typeNames']['values']):
return self.exceptionreport('InvalidParameterValue',
'typenames', 'Invalid typeNames parameter value: %s' %
tname)
# check elementname's
if 'elementname' in self.parent.kvp:
for ename in self.parent.kvp['elementname']:
if ename not in self.parent.repository.queryables['_all']:
return self.exceptionreport('InvalidParameterValue',
'elementname', 'Invalid ElementName parameter value: %s' %
ename)
maxrecords_cfg = -1 # not set in config server.maxrecords
if self.parent.config.has_option('server', 'maxrecords'):
maxrecords_cfg = int(self.parent.config.get('server', 'maxrecords'))
if 'maxrecords' in self.parent.kvp and self.parent.kvp['maxrecords'] == 'unlimited':
LOGGER.debug('Detected maxrecords=unlimited')
self.parent.kvp.pop('maxrecords')
if 'maxrecords' not in self.parent.kvp: # not specified by client
if maxrecords_cfg > -1: # specified in config
self.parent.kvp['maxrecords'] = maxrecords_cfg
else: # spec default
self.parent.kvp['maxrecords'] = 10
else: # specified by client
if self.parent.kvp['maxrecords'] == '':
self.parent.kvp['maxrecords'] = 10
if maxrecords_cfg > -1: # set in config
if int(self.parent.kvp['maxrecords']) > maxrecords_cfg:
self.parent.kvp['maxrecords'] = maxrecords_cfg
if any(x in opensearch.QUERY_PARAMETERS for x in self.parent.kvp):
LOGGER.debug('OpenSearch Geo/Time parameters detected.')
self.parent.kvp['constraintlanguage'] = 'FILTER'
try:
tmp_filter = opensearch.kvp2filterxml(self.parent.kvp, self.parent.context,
self.parent.profiles, fes_version='2.0')
except Exception as err:
return self.exceptionreport('InvalidParameterValue', 'bbox', str(err))
if tmp_filter != "":
self.parent.kvp['constraint'] = tmp_filter
LOGGER.debug('OpenSearch Geo/Time parameters to Filter: %s.', self.parent.kvp['constraint'])
if self.parent.requesttype == 'GET':
if 'constraint' in self.parent.kvp:
# GET request
LOGGER.debug('csw:Constraint passed over HTTP GET.')
if 'constraintlanguage' not in self.parent.kvp:
return self.exceptionreport('MissingParameterValue',
'constraintlanguage',
'constraintlanguage required when constraint specified')
if (self.parent.kvp['constraintlanguage'] not in
self.parent.context.model['operations']['GetRecords']['parameters']
['CONSTRAINTLANGUAGE']['values']):
return self.exceptionreport('InvalidParameterValue',
'constraintlanguage', 'Invalid constraintlanguage: %s'
% self.parent.kvp['constraintlanguage'])
if self.parent.kvp['constraintlanguage'] == 'CQL_TEXT':
tmp = self.parent.kvp['constraint']
try:
LOGGER.info('Transforming CQL into fes1')
LOGGER.debug('CQL: %s', tmp)
self.parent.kvp['constraint'] = {}
self.parent.kvp['constraint']['type'] = 'filter'
cql = cql2fes(tmp, self.parent.context.namespaces, fes_version='1.0')
self.parent.kvp['constraint']['where'], self.parent.kvp['constraint']['values'] = fes1.parse(cql,
self.parent.repository.queryables['_all'], self.parent.repository.dbtype,
self.parent.context.namespaces, self.parent.orm, self.parent.language['text'], self.parent.repository.fts)
self.parent.kvp['constraint']['_dict'] = xml2dict(etree.tostring(cql), self.parent.context.namespaces)
except Exception as err:
LOGGER.exception('Invalid CQL query %s', tmp)
return self.exceptionreport('InvalidParameterValue',
'constraint', 'Invalid Filter syntax')
elif self.parent.kvp['constraintlanguage'] == 'FILTER':
# validate filter XML
try:
schema = os.path.join(self.parent.config.get('server', 'home'),
'core', 'schemas', 'ogc', 'filter', '2.0', '_wrapper.xsd')
LOGGER.info('Validating Filter %s.', self.parent.kvp['constraint'])
schema = etree.XMLSchema(file=schema)
parser = etree.XMLParser(schema=schema, resolve_entities=False)
doc = etree.fromstring(self.parent.kvp['constraint'], parser)
LOGGER.debug('Filter is valid XML.')
self.parent.kvp['constraint'] = {}
self.parent.kvp['constraint']['type'] = 'filter'
self.parent.kvp['constraint']['where'], self.parent.kvp['constraint']['values'] = \
fes2.parse(doc,
self.parent.repository.queryables['_all'],
self.parent.repository.dbtype,
self.parent.context.namespaces, self.parent.orm, self.parent.language['text'], self.parent.repository.fts)
self.parent.kvp['constraint']['_dict'] = xml2dict(etree.tostring(doc), self.parent.context.namespaces)
except Exception as err:
errortext = \
'Exception: document not valid.\nError: %s' % str(err)
LOGGER.exception(errortext)
return self.exceptionreport('InvalidParameterValue',
'bbox', 'Invalid Filter query: %s' % errortext)
else:
self.parent.kvp['constraint'] = {}
if 'sortby' not in self.parent.kvp:
self.parent.kvp['sortby'] = None
elif 'sortby' in self.parent.kvp and self.parent.requesttype == 'GET':
LOGGER.debug('Sorted query specified')
tmp = self.parent.kvp['sortby']
self.parent.kvp['sortby'] = {}
try:
name, order = tmp.rsplit(':', 1)
except:
return self.exceptionreport('InvalidParameterValue',
'sortby', 'Invalid SortBy value: must be in the format\
propertyname:A or propertyname:D')
try:
self.parent.kvp['sortby']['propertyname'] = \
self.parent.repository.queryables['_all'][name]['dbcol']
if name.find('BoundingBox') != -1 or name.find('Envelope') != -1:
# it's a spatial sort
self.parent.kvp['sortby']['spatial'] = True
except Exception as err:
return self.exceptionreport('InvalidParameterValue',
'sortby', 'Invalid SortBy propertyname: %s' % name)
if order not in ['A', 'D']:
return self.exceptionreport('InvalidParameterValue',
'sortby', 'Invalid SortBy value: sort order must be "A" or "D"')
if order == 'D':
self.parent.kvp['sortby']['order'] = 'DESC'
else:
self.parent.kvp['sortby']['order'] = 'ASC'
if 'startposition' not in self.parent.kvp or not self.parent.kvp['startposition']:
self.parent.kvp['startposition'] = 1
if 'recordids' in self.parent.kvp and self.parent.kvp['recordids'] != '':
# query repository
LOGGER.info('Querying repository with RECORD ids: %s', self.parent.kvp['recordids'])
results = self.parent.repository.query_ids(self.parent.kvp['recordids'].split(','))
matched = str(len(results))
if len(results) == 0:
return self.exceptionreport('NotFound', 'recordids',
'No records found for \'%s\'' % self.parent.kvp['recordids'])
else:
# query repository
LOGGER.info('Querying repository with constraint: %s,\
sortby: %s, typenames: %s, maxrecords: %s, startposition: %s.',
self.parent.kvp['constraint'], self.parent.kvp['sortby'], self.parent.kvp['typenames'],
self.parent.kvp['maxrecords'], self.parent.kvp['startposition'])
try:
matched, results = self.parent.repository.query(
constraint=self.parent.kvp['constraint'],
sortby=self.parent.kvp['sortby'], typenames=self.parent.kvp['typenames'],
maxrecords=self.parent.kvp['maxrecords'],
startposition=int(self.parent.kvp['startposition'])-1)
except Exception as err:
LOGGER.exception('Invalid query syntax. Query: %s', self.parent.kvp['constraint'])
LOGGER.exception('Invalid query syntax. Result: %s', err)
return self.exceptionreport('InvalidParameterValue', 'constraint',
'Invalid query syntax')
if int(matched) == 0:
returned = nextrecord = '0'
elif int(self.parent.kvp['maxrecords']) == 0:
returned = nextrecord = '0'
elif int(matched) < int(self.parent.kvp['startposition']):
returned = nextrecord = '0'
elif int(matched) <= int(self.parent.kvp['startposition']) + int(self.parent.kvp['maxrecords']) - 1:
returned = str(int(matched) - int(self.parent.kvp['startposition']) + 1)
nextrecord = '0'
else:
returned = str(self.parent.kvp['maxrecords'])
nextrecord = str(int(self.parent.kvp['startposition']) + int(self.parent.kvp['maxrecords']))
LOGGER.debug('Results: matched: %s, returned: %s, next: %s',
matched, returned, nextrecord)
node = etree.Element(util.nspath_eval('csw30:GetRecordsResponse',
self.parent.context.namespaces),
nsmap=self.parent.context.namespaces, version='3.0.0')
node.attrib[util.nspath_eval('xsi:schemaLocation',
self.parent.context.namespaces)] = \
'%s %s/cat/csw/3.0/cswGetRecords.xsd' % \
(self.parent.context.namespaces['csw30'], self.parent.config.get('server', 'ogc_schemas_base'))
if 'requestid' in self.parent.kvp and self.parent.kvp['requestid'] is not None:
etree.SubElement(node, util.nspath_eval('csw:RequestId',
self.parent.context.namespaces)).text = self.parent.kvp['requestid']
etree.SubElement(node, util.nspath_eval('csw30:SearchStatus',
self.parent.context.namespaces), timestamp=timestamp)
#if 'where' not in self.parent.kvp['constraint'] and \
#self.parent.kvp['resulttype'] is None:
# returned = '0'
searchresults = etree.SubElement(node,
util.nspath_eval('csw30:SearchResults', self.parent.context.namespaces),
numberOfRecordsMatched=matched, numberOfRecordsReturned=returned,
nextRecord=nextrecord, recordSchema=self.parent.kvp['outputschema'],
expires=timestamp, status=get_resultset_status(matched, nextrecord))
if self.parent.kvp['elementsetname'] is not None:
searchresults.attrib['elementSet'] = self.parent.kvp['elementsetname']
#if 'where' not in self.parent.kvp['constraint'] \
#and self.parent.kvp['resulttype'] is None:
# LOGGER.debug('Empty result set returned')
# return node
if results is not None:
if len(results) < int(self.parent.kvp['maxrecords']):
max1 = len(results)
else:
max1 = int(self.parent.kvp['startposition']) + (int(self.parent.kvp['maxrecords'])-1)
LOGGER.info('Presenting records %s - %s',
self.parent.kvp['startposition'], max1)
for res in results:
node_ = None
if self.parent.xslts:
try:
node_ = self.parent._render_xslt(res)
except Exception as err:
self.parent.response = self.exceptionreport(
'NoApplicableCode', 'service',
'XSLT transformation failed. Check server logs for errors')
return self.parent.response
if node_ is not None:
searchresults.append(node_)
else:
try:
if (self.parent.kvp['outputschema'] ==
'http://www.opengis.net/cat/csw/3.0' and
('csw:Record' in self.parent.kvp['typenames'] or
'csw30:Record' in self.parent.kvp['typenames'])):
# serialize csw:Record inline
searchresults.append(self._write_record(
res, self.parent.repository.queryables['_all']))
elif (self.parent.kvp['outputschema'] ==
'http://www.opengis.net/cat/csw/3.0' and
'csw:Record' not in self.parent.kvp['typenames']):
# serialize into csw:Record model
for prof in self.parent.profiles['loaded']:
# find source typename
if self.parent.profiles['loaded'][prof].typename in \
self.parent.kvp['typenames']:
typename = self.parent.profiles['loaded'][prof].typename
break
util.transform_mappings(
self.parent.repository.queryables['_all'],
self.parent.context.model['typenames'][typename][
'mappings']['csw:Record']
)
searchresults.append(self._write_record(
res, self.parent.repository.queryables['_all']))
elif self.parent.kvp['outputschema'] in self.parent.outputschemas: # use outputschema serializer
searchresults.append(self.parent.outputschemas[self.parent.kvp['outputschema']].write_record(res, self.parent.kvp['elementsetname'], self.parent.context, self.parent.config.get('server', 'url')))
else: # use profile serializer
searchresults.append(
self.parent.profiles['loaded'][self.parent.kvp['outputschema']].\
write_record(res, self.parent.kvp['elementsetname'],
self.parent.kvp['outputschema'],
self.parent.repository.queryables['_all']))
except Exception as err:
self.parent.response = self.exceptionreport(
'NoApplicableCode', 'service',
'Record serialization failed: %s' % str(err))
return self.parent.response
if (self.parent.config.has_option('server', 'federatedcatalogues') and
'distributedsearch' in self.parent.kvp and 'hopcount' in self.parent.kvp and
self.parent.kvp['distributedsearch'] and int(self.parent.kvp['hopcount']) > 0):
# do distributed search
# if all([self.parent.config.has_option('server', 'federatedcatalogues'),
# 'distributedsearch' in self.parent.kvp,
# self.parent.kvp['distributedsearch'],
# 'hopcount' in self.parent.kvp,
# int(self.parent.kvp['hopcount']) > 0]): # do distributed search
LOGGER.debug('DistributedSearch specified (hopCount: %s)',
self.parent.kvp['hopcount'])
from owslib.csw import CatalogueServiceWeb
from owslib.ows import ExceptionReport
for fedcat in \
self.parent.config.get('server', 'federatedcatalogues').split(','):
LOGGER.info('Performing distributed search on federated \
catalogue: %s', fedcat)
try:
start_time = time()
remotecsw = CatalogueServiceWeb(fedcat, version='3.0.0', skip_caps=True)
if str(self.parent.request).startswith('http'):
self.parent.request = self.parent.request.split('?')[-1]
self.parent.request = self.parent.request.replace('mode=opensearch', '')
remotecsw.getrecords(xml=self.parent.request,
esn=self.parent.kvp['elementsetname'],
outputschema=self.parent.kvp['outputschema'])
fsr = etree.SubElement(searchresults, util.nspath_eval(
'csw30:FederatedSearchResult',
self.parent.context.namespaces),
catalogueURL=fedcat)
msg = 'Distributed search results from catalogue %s: %s.' % (fedcat, remotecsw.results)
LOGGER.debug(msg)
fsr.append(etree.Comment(msg))
search_result = etree.SubElement(fsr, util.nspath_eval(
'csw30:searchResult', self.parent.context.namespaces),
recordSchema=self.parent.kvp['outputschema'],
elementSetName=self.parent.kvp['elementsetname'],
numberOfRecordsMatched=str(remotecsw.results['matches']),
numberOfRecordsReturned=str(remotecsw.results['returned']),
nextRecord=str(remotecsw.results['nextrecord']),
elapsedTime=str(get_elapsed_time(start_time, time())),
status=get_resultset_status(
remotecsw.results['matches'],
remotecsw.results['nextrecord']))
for result in remotecsw.records.values():
search_result.append(etree.fromstring(result.xml, self.parent.context.parser))
except ExceptionReport as err:
error_string = 'remote CSW %s returned exception: ' % fedcat
searchresults.append(etree.Comment(
' %s\n\n%s ' % (error_string, err)))
LOGGER.exception(error_string)
except Exception as err:
error_string = 'remote CSW %s returned error: ' % fedcat
searchresults.append(etree.Comment(
' %s\n\n%s ' % (error_string, err)))
LOGGER.exception(error_string)
searchresults.attrib['elapsedTime'] = str(get_elapsed_time(self.parent.process_time_start, time()))
if 'responsehandler' in self.parent.kvp: # process the handler
self.parent._process_responsehandler(etree.tostring(node,
pretty_print=self.parent.pretty_print))
else:
return node
def getrecordbyid(self, raw=False):
''' Handle GetRecordById request '''
if 'id' not in self.parent.kvp:
return self.exceptionreport('MissingParameterValue', 'id',
'Missing id parameter')
if len(self.parent.kvp['id']) < 1:
return self.exceptionreport('InvalidParameterValue', 'id',
'Invalid id parameter')
if 'outputschema' not in self.parent.kvp:
self.parent.kvp['outputschema'] = self.parent.context.namespaces['csw30']
if 'HTTP_ACCEPT' in self.parent.environ:
LOGGER.debug('Detected HTTP Accept header: %s', self.parent.environ['HTTP_ACCEPT'])
formats_match = False
if 'outputformat' in self.parent.kvp:
LOGGER.debug(self.parent.kvp['outputformat'])
for ofmt in self.parent.environ['HTTP_ACCEPT'].split(','):
LOGGER.info('Comparing %s and %s', ofmt, self.parent.kvp['outputformat'])
if ofmt.split('/')[0] in self.parent.kvp['outputformat']:
LOGGER.debug('FOUND OUTPUT MATCH')
formats_match = True
if not formats_match:
return self.exceptionreport('InvalidParameterValue',
'outputformat', 'HTTP Accept header (%s) and outputformat (%s) must agree' %
(self.parent.environ['HTTP_ACCEPT'], self.parent.kvp['outputformat']))
else:
for ofmt in self.parent.environ['HTTP_ACCEPT'].split(','):
if ofmt in self.parent.context.model['operations']['GetRecords']['parameters']['outputFormat']['values']:
self.parent.kvp['outputformat'] = ofmt
break
if ('outputformat' in self.parent.kvp and
self.parent.kvp['outputformat'] not in
self.parent.context.model['operations']['GetRecordById']['parameters']
['outputFormat']['values']):
return self.exceptionreport('InvalidParameterValue',
'outputformat', 'Invalid outputformat parameter %s' %
self.parent.kvp['outputformat'])
if ('outputschema' in self.parent.kvp and self.parent.kvp['outputschema'] not in
self.parent.context.model['operations']['GetRecordById']['parameters']
['outputSchema']['values']):
return self.exceptionreport('InvalidParameterValue',
'outputschema', 'Invalid outputschema parameter %s' %
self.parent.kvp['outputschema'])
if 'outputformat' in self.parent.kvp:
self.parent.contenttype = self.parent.kvp['outputformat']
if self.parent.kvp['outputformat'] == 'application/atom+xml':
self.parent.kvp['outputschema'] = self.parent.context.namespaces['atom']
self.parent.mode = 'opensearch'
if 'elementsetname' not in self.parent.kvp:
self.parent.kvp['elementsetname'] = 'summary'
else:
if (self.parent.kvp['elementsetname'] not in
self.parent.context.model['operations']['GetRecordById']['parameters']
['ElementSetName']['values']):
return self.exceptionreport('InvalidParameterValue',
'elementsetname', 'Invalid elementsetname parameter %s' %
self.parent.kvp['elementsetname'])
# query repository
LOGGER.info('Querying repository with ids: %s', self.parent.kvp['id'])
results = self.parent.repository.query_ids([self.parent.kvp['id']])
if raw: # GetRepositoryItem request
LOGGER.debug('GetRepositoryItem request.')
if len(results) > 0:
return etree.fromstring(util.getqattr(results[0],
self.parent.context.md_core_model['mappings']['pycsw:XML']), self.parent.context.parser)
for result in results:
node_ = None
if self.parent.xslts:
try:
node_ = self.parent._render_xslt(result)
except Exception as err:
self.parent.response = self.exceptionreport(
'NoApplicableCode', 'service',
'XSLT transformation failed. Check server logs for errors %s' % str(err))
return self.parent.response
if node_ is not None:
node = node_
else:
if (util.getqattr(result,
self.parent.context.md_core_model['mappings']['pycsw:Typename']) == 'csw:Record'
and self.parent.kvp['outputschema'] ==
'http://www.opengis.net/cat/csw/3.0'):
# serialize record inline
node = self._write_record(
result, self.parent.repository.queryables['_all'])
elif (self.parent.kvp['outputschema'] ==
'http://www.opengis.net/cat/csw/3.0'):
# serialize into csw:Record model
typename = None
for prof in self.parent.profiles['loaded']: # find source typename
if self.parent.profiles['loaded'][prof].typename in \
[util.getqattr(result, self.parent.context.md_core_model['mappings']['pycsw:Typename'])]:
typename = self.parent.profiles['loaded'][prof].typename
break
if typename is not None:
util.transform_mappings(
self.parent.repository.queryables['_all'],
self.parent.context.model['typenames'][typename][
'mappings']['csw:Record']
)
node = self._write_record( result, self.parent.repository.queryables['_all'])
elif self.parent.kvp['outputschema'] in self.parent.outputschemas: # use outputschema serializer
node = self.parent.outputschemas[self.parent.kvp['outputschema']].write_record(result, self.parent.kvp['elementsetname'], self.parent.context, self.parent.config.get('server', 'url'))
else: # it's a profile output
node = self.parent.profiles['loaded'][self.parent.kvp['outputschema']].write_record(
result, self.parent.kvp['elementsetname'],
self.parent.kvp['outputschema'], self.parent.repository.queryables['_all'])
if raw and len(results) == 0:
return None
if len(results) == 0:
return self.exceptionreport('NotFound', 'id',
'No repository item found for \'%s\'' % self.parent.kvp['id'])
return node
def getrepositoryitem(self):
''' Handle GetRepositoryItem request '''
# similar to GetRecordById without csw:* wrapping
node = self.parent.getrecordbyid(raw=True)
if node is None:
return self.exceptionreport('NotFound', 'id',
'No repository item found for \'%s\'' % self.parent.kvp['id'])
else:
return node
def transaction(self):
''' Handle Transaction request '''
try:
self.parent._test_manager()
except Exception as err:
return self.exceptionreport('NoApplicableCode', 'transaction',
str(err))
inserted = 0
updated = 0
deleted = 0
insertresults = []
LOGGER.debug('Transaction list: %s', self.parent.kvp['transactions'])
for ttype in self.parent.kvp['transactions']:
if ttype['type'] == 'insert':
try:
record = metadata.parse_record(self.parent.context,
ttype['xml'], self.parent.repository)[0]
except Exception as err:
LOGGER.exception('Transaction (insert) failed')
return self.exceptionreport('NoApplicableCode', 'insert',
'Transaction (insert) failed: record parsing failed: %s' \
% str(err))
LOGGER.debug('Transaction operation: %s', record)
if not hasattr(record,
self.parent.context.md_core_model['mappings']['pycsw:Identifier']):
return self.exceptionreport('NoApplicableCode',
'insert', 'Record requires an identifier')
# insert new record
try:
self.parent.repository.insert(record, 'local',
util.get_today_and_now())
inserted += 1
insertresults.append(
{'identifier': getattr(record,
self.parent.context.md_core_model['mappings']['pycsw:Identifier']),
'title': getattr(record,
self.parent.context.md_core_model['mappings']['pycsw:Title'])})
except Exception as err:
LOGGER.exception('Transaction (insert) failed')
return self.exceptionreport('NoApplicableCode',
'insert', 'Transaction (insert) failed: %s.' % str(err))
elif ttype['type'] == 'update':
if 'constraint' not in ttype:
# update full existing resource in repository
try:
record = metadata.parse_record(self.parent.context,
ttype['xml'], self.parent.repository)[0]
identifier = getattr(record,
self.parent.context.md_core_model['mappings']['pycsw:Identifier'])
except Exception as err:
return self.exceptionreport('NoApplicableCode', 'insert',
'Transaction (update) failed: record parsing failed: %s' \
% str(err))
# query repository to see if record already exists
LOGGER.info('checking if record exists (%s)', identifier)
results = self.parent.repository.query_ids(ids=[identifier])
if len(results) == 0:
LOGGER.debug('id %s does not exist in repository', identifier)
else: # existing record, it's an update
try:
self.parent.repository.update(record)
updated += 1
except Exception as err:
return self.exceptionreport('NoApplicableCode',
'update',
'Transaction (update) failed: %s.' % str(err))
else: # update by record property and constraint
# get / set XPath for property names
for rp in ttype['recordproperty']:
if rp['name'] not in self.parent.repository.queryables['_all']:
# is it an XPath?
if rp['name'].find('/') != -1:
# scan outputschemas; if match, bind
for osch in self.parent.outputschemas.values():
for key, value in osch.XPATH_MAPPINGS.items():
if value == rp['name']: # match
rp['rp'] = {'xpath': value, 'name': key}
rp['rp']['dbcol'] = self.parent.repository.queryables['_all'][key]
break
else:
return self.exceptionreport('NoApplicableCode',
'update', 'Transaction (update) failed: invalid property2: %s.' % str(rp['name']))
else:
rp['rp']= \
self.parent.repository.queryables['_all'][rp['name']]
LOGGER.debug('Record Properties: %s', ttype['recordproperty'])
try:
updated += self.parent.repository.update(record=None,
recprops=ttype['recordproperty'],
constraint=ttype['constraint'])
except Exception as err:
LOGGER.exception('Transaction (update) failed')
return self.exceptionreport('NoApplicableCode',
'update',
'Transaction (update) failed: %s.' % str(err))
elif ttype['type'] == 'delete':
deleted += self.parent.repository.delete(ttype['constraint'])
node = etree.Element(util.nspath_eval('csw30:TransactionResponse',
self.parent.context.namespaces), nsmap=self.parent.context.namespaces, version='3.0.0')
node.attrib[util.nspath_eval('xsi:schemaLocation',
self.parent.context.namespaces)] = '%s %s/csw/3.0/cswTransaction.xsd' % \
(self.parent.context.namespaces['csw30'], self.parent.config.get('server', 'ogc_schemas_base'))
node.append(
self._write_transactionsummary(
inserted=inserted, updated=updated, deleted=deleted))
if (len(insertresults) > 0 and self.parent.kvp['verboseresponse']):
# show insert result identifiers
node.append(self._write_verboseresponse(insertresults))
return node
def harvest(self):
''' Handle Harvest request '''
service_identifier = None
old_identifier = None
deleted = []
try:
self.parent._test_manager()
except Exception as err:
return self.exceptionreport('NoApplicableCode', 'harvest', str(err))
if self.parent.requesttype == 'GET':
if 'resourcetype' not in self.parent.kvp:
return self.exceptionreport('MissingParameterValue',
'resourcetype', 'Missing resourcetype parameter')
if 'source' not in self.parent.kvp:
return self.exceptionreport('MissingParameterValue',
'source', 'Missing source parameter')
# validate resourcetype
if (self.parent.kvp['resourcetype'] not in
self.parent.context.model['operations']['Harvest']['parameters']['ResourceType']
['values']):
return self.exceptionreport('InvalidParameterValue',
'resourcetype', 'Invalid resource type parameter: %s.\
Allowable resourcetype values: %s' % (self.parent.kvp['resourcetype'],
','.join(sorted(self.parent.context.model['operations']['Harvest']['parameters']
['ResourceType']['values']))))
if (self.parent.kvp['resourcetype'].find('opengis.net') == -1 and
self.parent.kvp['resourcetype'].find('urn:geoss:waf') == -1):
# fetch content-based resource
LOGGER.info('Fetching resource %s', self.parent.kvp['source'])
try:
content = util.http_request('GET', self.parent.kvp['source'])
except Exception as err:
errortext = 'Error fetching resource %s.\nError: %s.' % \
(self.parent.kvp['source'], str(err))
LOGGER.exception(errortext)
return self.exceptionreport('InvalidParameterValue', 'source',
errortext)
else: # it's a service URL
content = self.parent.kvp['source']
# query repository to see if service already exists
LOGGER.info('checking if service exists (%s)', content)
results = self.parent.repository.query_source(content)
if len(results) > 0: # exists, keep identifier for update
LOGGER.debug('Service already exists, keeping identifier and results')
service_identifier = getattr(results[0], self.parent.context.md_core_model['mappings']['pycsw:Identifier'])
service_results = results
LOGGER.debug('Identifier is %s', service_identifier)
# return self.exceptionreport('NoApplicableCode', 'source',
# 'Insert failed: service %s already in repository' % content)
if hasattr(self.parent.repository, 'local_ingest') and self.parent.repository.local_ingest:
updated = 0
deleted = []
try:
ir = self.parent.repository.insert(self.parent.kvp['resourcetype'], self.parent.kvp['source'])
inserted = len(ir)
except Exception as err:
LOGGER.exception('Harvest (insert) failed')
return self.exceptionreport('NoApplicableCode',
'source', 'Harvest (insert) failed: %s.' % str(err))
else:
# parse resource into record
try:
records_parsed = metadata.parse_record(self.parent.context,
content, self.parent.repository, self.parent.kvp['resourcetype'],
pagesize=self.parent.csw_harvest_pagesize)
except Exception as err:
LOGGER.exception(err)
return self.exceptionreport('NoApplicableCode', 'source',
'Harvest failed: record parsing failed: %s' % str(err))
inserted = 0
updated = 0
ir = []
LOGGER.debug('Total Records parsed: %d', len(records_parsed))
for record in records_parsed:
if self.parent.kvp['resourcetype'] == 'urn:geoss:waf':
src = record.source
else:
src = self.parent.kvp['source']
setattr(record, self.parent.context.md_core_model['mappings']['pycsw:Source'],
src)
setattr(record, self.parent.context.md_core_model['mappings']['pycsw:InsertDate'],
util.get_today_and_now())
identifier = getattr(record,
self.parent.context.md_core_model['mappings']['pycsw:Identifier'])
source = getattr(record,
self.parent.context.md_core_model['mappings']['pycsw:Source'])
insert_date = getattr(record,
self.parent.context.md_core_model['mappings']['pycsw:InsertDate'])
title = getattr(record,
self.parent.context.md_core_model['mappings']['pycsw:Title'])
record_type = getattr(record, self.parent.context.md_core_model['mappings']['pycsw:Type'])
record_identifier = getattr(record, self.parent.context.md_core_model['mappings']['pycsw:Identifier'])
if record_type == 'service' and service_identifier is not None: # service endpoint
LOGGER.info('Replacing service identifier from %s to %s', record_identifier, service_identifier)
old_identifier = record_identifier
identifier = record_identifier = service_identifier
if (record_type != 'service' and service_identifier is not None
and old_identifier is not None): # service resource
if record_identifier.find(old_identifier) != -1:
new_identifier = record_identifier.replace(old_identifier, service_identifier)
LOGGER.info('Replacing service resource identifier from %s to %s', record_identifier, new_identifier)
identifier = record_identifier = new_identifier
ir.append({'identifier': identifier, 'title': title})
results = []
if not self.parent.config.has_option('repository', 'source'):
# query repository to see if record already exists
LOGGER.info('checking if record exists (%s)', identifier)
results = self.parent.repository.query_ids(ids=[identifier])
if len(results) == 0: # check for service identifier
LOGGER.info('checking if service id exists (%s)', service_identifier)
results = self.parent.repository.query_ids(ids=[service_identifier])
LOGGER.debug(str(results))
if len(results) == 0: # new record, it's a new insert
inserted += 1
try:
tmp = self.parent.repository.insert(record, source, insert_date)
if tmp is not None: ir = tmp
except Exception as err:
return self.exceptionreport('NoApplicableCode',
'source', 'Harvest (insert) failed: %s.' % str(err))
else: # existing record, it's an update
if source != results[0].source:
# same identifier, but different source
return self.exceptionreport('NoApplicableCode',
'source', 'Insert failed: identifier %s in repository\
has source %s.' % (identifier, source))
try:
self.parent.repository.update(record)
except Exception as err:
return self.exceptionreport('NoApplicableCode',
'source', 'Harvest (update) failed: %s.' % str(err))
updated += 1
if service_identifier is not None:
fresh_records = [str(i['identifier']) for i in ir]
existing_records = [str(i.identifier) for i in service_results]
deleted = set(existing_records) - set(fresh_records)
LOGGER.debug('Records to delete: %s', deleted)
for to_delete in deleted:
delete_constraint = {
'type': 'filter',
'values': [to_delete],
'where': 'identifier = :pvalue0'
}
self.parent.repository.delete(delete_constraint)
node = etree.Element(util.nspath_eval('csw:HarvestResponse',
self.parent.context.namespaces), nsmap=self.parent.context.namespaces)
node.attrib[util.nspath_eval('xsi:schemaLocation',
self.parent.context.namespaces)] = \
'%s %s/csw/3.0/cswHarvest.xsd' % (self.parent.context.namespaces['csw30'],
self.parent.config.get('server', 'ogc_schemas_base'))
node2 = etree.SubElement(node,
util.nspath_eval('csw:TransactionResponse',
self.parent.context.namespaces), version='2.0.2')
node2.append(
self._write_transactionsummary(inserted=len(ir), updated=updated,
deleted=len(deleted)))
if inserted > 0:
# show insert result identifiers
node2.append(self._write_verboseresponse(ir))
if 'responsehandler' in self.parent.kvp: # process the handler
self.parent._process_responsehandler(etree.tostring(node,
pretty_print=self.parent.pretty_print))
else:
return node
def _write_record(self, recobj, queryables):
''' Generate csw30:Record '''
if self.parent.kvp['elementsetname'] == 'brief':
elname = 'BriefRecord'
elif self.parent.kvp['elementsetname'] == 'summary':
elname = 'SummaryRecord'
else:
elname = 'Record'
record = etree.Element(util.nspath_eval('csw30:%s' % elname,
self.parent.context.namespaces), nsmap=self.parent.context.namespaces)
if ('elementname' in self.parent.kvp and
len(self.parent.kvp['elementname']) > 0):
for req_term in ['dc:identifier', 'dc:title']:
if req_term not in self.parent.kvp['elementname']:
value = util.getqattr(recobj, queryables[req_term]['dbcol'])
etree.SubElement(record,
util.nspath_eval(req_term,
self.parent.context.namespaces)).text = value
for elemname in self.parent.kvp['elementname']:
if (elemname.find('BoundingBox') != -1 or
elemname.find('Envelope') != -1):
bboxel = write_boundingbox(util.getqattr(recobj,
self.parent.context.md_core_model['mappings']['pycsw:BoundingBox']),
self.parent.context.namespaces)
if bboxel is not None:
record.append(bboxel)
else:
value = util.getqattr(recobj, queryables[elemname]['dbcol'])
elem = etree.SubElement(record,
util.nspath_eval(elemname,
self.parent.context.namespaces))
if value:
elem.text = value
elif 'elementsetname' in self.parent.kvp:
if (self.parent.kvp['elementsetname'] == 'full' and
util.getqattr(recobj, self.parent.context.md_core_model['mappings']\
['pycsw:Typename']) == 'csw:Record' and
util.getqattr(recobj, self.parent.context.md_core_model['mappings']\
['pycsw:Schema']) == 'http://www.opengis.net/cat/csw/3.0' and
util.getqattr(recobj, self.parent.context.md_core_model['mappings']\
['pycsw:Type']) != 'service'):
# dump record as is and exit
return etree.fromstring(util.getqattr(recobj,
self.parent.context.md_core_model['mappings']['pycsw:XML']), self.parent.context.parser)
etree.SubElement(record,
util.nspath_eval('dc:identifier', self.parent.context.namespaces)).text = \
util.getqattr(recobj,
self.parent.context.md_core_model['mappings']['pycsw:Identifier'])
for i in ['dc:title', 'dc:type']:
val = util.getqattr(recobj, queryables[i]['dbcol'])
if not val:
val = ''
etree.SubElement(record, util.nspath_eval(i,
self.parent.context.namespaces)).text = val
if self.parent.kvp['elementsetname'] in ['summary', 'full']:
# add summary elements
keywords = util.getqattr(recobj, queryables['dc:subject']['dbcol'])
if keywords is not None:
for keyword in keywords.split(','):
etree.SubElement(record,
util.nspath_eval('dc:subject',
self.parent.context.namespaces)).text = keyword
val = util.getqattr(recobj, self.parent.context.md_core_model['mappings']['pycsw:TopicCategory'])
if val:
etree.SubElement(record,
util.nspath_eval('dc:subject',
self.parent.context.namespaces), scheme='http://www.isotc211.org/2005/resources/Codelist/gmxCodelists.xml#MD_TopicCategoryCode').text = val
val = util.getqattr(recobj, queryables['dc:format']['dbcol'])
if val:
etree.SubElement(record,
util.nspath_eval('dc:format',
self.parent.context.namespaces)).text = val
# links
rlinks = util.getqattr(recobj,
self.parent.context.md_core_model['mappings']['pycsw:Links'])
if rlinks:
LOGGER.info('link type: {}'.format(type(rlinks)))
for link in util.jsonify_links(rlinks):
ref = etree.SubElement(record, util.nspath_eval('dct:references',
self.parent.context.namespaces))
if link['protocol']:
ref.attrib['scheme'] = link['protocol']
ref.text = link['url']
for i in ['dc:relation', 'dct:modified', 'dct:abstract']:
val = util.getqattr(recobj, queryables[i]['dbcol'])
if val is not None:
etree.SubElement(record,
util.nspath_eval(i, self.parent.context.namespaces)).text = val
if self.parent.kvp['elementsetname'] == 'full': # add full elements
for i in ['dc:date', 'dc:creator', \
'dc:publisher', 'dc:contributor', 'dc:source', \
'dc:language', 'dc:rights', 'dct:alternative']:
val = util.getqattr(recobj, queryables[i]['dbcol'])
if val:
etree.SubElement(record,
util.nspath_eval(i, self.parent.context.namespaces)).text = val
val = util.getqattr(recobj, queryables['dct:spatial']['dbcol'])
if val:
etree.SubElement(record,
util.nspath_eval('dct:spatial', self.parent.context.namespaces), scheme='http://www.opengis.net/def/crs').text = val
# always write out ows:BoundingBox
bboxel = write_boundingbox(getattr(recobj,
self.parent.context.md_core_model['mappings']['pycsw:BoundingBox']),
self.parent.context.namespaces)
if bboxel is not None:
record.append(bboxel)
if self.parent.kvp['elementsetname'] != 'brief': # add temporal extent
begin = util.getqattr(record, self.parent.context.md_core_model['mappings']['pycsw:TempExtent_begin'])
end = util.getqattr(record, self.parent.context.md_core_model['mappings']['pycsw:TempExtent_end'])
if begin or end:
tempext = etree.SubElement(record, util.nspath_eval('csw30:TemporalExtent', self.parent.context.namespaces))
if begin:
etree.SubElement(record, util.nspath_eval('csw30:begin', self.parent.context.namespaces)).text = begin
if end:
etree.SubElement(record, util.nspath_eval('csw30:end', self.parent.context.namespaces)).text = end
return record
def _parse_constraint(self, element):
''' Parse csw:Constraint '''
query = {}
tmp = element.find(util.nspath_eval('fes20:Filter', self.parent.context.namespaces))
if tmp is not None:
LOGGER.debug('Filter constraint specified')
try:
query['type'] = 'filter'
query['where'], query['values'] = fes2.parse(tmp,
self.parent.repository.queryables['_all'], self.parent.repository.dbtype,
self.parent.context.namespaces, self.parent.orm, self.parent.language['text'], self.parent.repository.fts)
query['_dict'] = xml2dict(etree.tostring(tmp), self.parent.context.namespaces)
except Exception as err:
return 'Invalid Filter request: %s' % err
tmp = element.find(util.nspath_eval('csw30:CqlText', self.parent.context.namespaces))
if tmp is not None:
LOGGER.debug('CQL specified: %s.', tmp.text)
try:
LOGGER.info('Transforming CQL into OGC Filter')
query['type'] = 'filter'
cql = cql2fes(tmp.text, self.parent.context.namespaces, fes_version='2.0')
query['where'], query['values'] = fes2.parse(cql,
self.parent.repository.queryables['_all'], self.parent.repository.dbtype,
self.parent.context.namespaces, self.parent.orm, self.parent.language['text'], self.parent.repository.fts)
query['_dict'] = xml2dict(etree.tostring(cql), self.parent.context.namespaces)
except Exception as err:
LOGGER.exception('Invalid CQL request: %s', tmp.text)
LOGGER.exception('Error message: %s', err)
return 'Invalid CQL request'
return query
def parse_postdata(self, postdata):
''' Parse POST XML '''
request = {}
try:
LOGGER.info('Parsing %s.', postdata)
doc = etree.fromstring(postdata, self.parent.context.parser)
except Exception as err:
errortext = \
'Exception: document not well-formed.\nError: %s.' % str(err)
LOGGER.exception(errortext)
return errortext
# if this is a SOAP request, get to SOAP-ENV:Body/csw:*
if (doc.tag == util.nspath_eval('soapenv:Envelope',
self.parent.context.namespaces)):
LOGGER.debug('SOAP request specified')
self.parent.soap = True
doc = doc.find(
util.nspath_eval('soapenv:Body',
self.parent.context.namespaces)).xpath('child::*')[0]
xsd_filename = 'csw%s.xsd' % etree.QName(doc).localname
schema = os.path.join(self.parent.config.get('server', 'home'),
'core', 'schemas', 'ogc', 'cat', 'csw', '3.0', xsd_filename)
try:
# it is virtually impossible to validate a csw:Transaction
# csw:Insert|csw:Update (with single child) XML document.
# Only validate non csw:Transaction XML
if doc.find('.//%s' % util.nspath_eval('csw30:Insert',
self.parent.context.namespaces)) is None and \
len(doc.xpath('//csw30:Update/child::*',
namespaces=self.parent.context.namespaces)) == 0:
LOGGER.info('Validating %s', postdata)
schema = etree.XMLSchema(file=schema)
parser = etree.XMLParser(schema=schema, resolve_entities=False)
if hasattr(self.parent, 'soap') and self.parent.soap:
# validate the body of the SOAP request
doc = etree.fromstring(etree.tostring(doc), parser)
else: # validate the request normally
doc = etree.fromstring(postdata, parser)
LOGGER.debug('Request is valid XML')
else: # parse Transaction without validation
doc = etree.fromstring(postdata, self.parent.context.parser)
except Exception as err:
errortext = \
'Exception: the document is not valid.\nError: %s' % str(err)
LOGGER.exception(errortext)
return errortext
request['request'] = etree.QName(doc).localname
LOGGER.debug('Request operation %s specified.', request['request'])
tmp = doc.find('.').attrib.get('service')
if tmp is not None:
request['service'] = tmp
tmp = doc.find('.').attrib.get('version')
if tmp is not None:
request['version'] = tmp
tmp = doc.find('.//%s' % util.nspath_eval('ows20:Version',
self.parent.context.namespaces))
if tmp is not None:
request['version'] = tmp.text
tmp = doc.find('.').attrib.get('updateSequence')
if tmp is not None:
request['updatesequence'] = tmp
# GetCapabilities
if request['request'] == 'GetCapabilities':
tmp = doc.find(util.nspath_eval('ows20:Sections',
self.parent.context.namespaces))
if tmp is not None:
request['sections'] = ','.join([section.text for section in \
doc.findall(util.nspath_eval('ows20:Sections/ows20:Section',
self.parent.context.namespaces))])
tmp = doc.find(util.nspath_eval('ows20:AcceptFormats',
self.parent.context.namespaces))
if tmp is not None:
request['acceptformats'] = ','.join([aformat.text for aformat in \
doc.findall(util.nspath_eval('ows20:AcceptFormats/ows20:OutputFormat',
self.parent.context.namespaces))])
tmp = doc.find(util.nspath_eval('ows20:AcceptVersions',
self.parent.context.namespaces))
if tmp is not None:
request['acceptversions'] = ','.join([version.text for version in \
doc.findall(util.nspath_eval('ows20:AcceptVersions/ows20:Version',
self.parent.context.namespaces))])
# GetDomain
if request['request'] == 'GetDomain':
tmp = doc.find(util.nspath_eval('csw30:ParameterName',
self.parent.context.namespaces))
if tmp is not None:
request['parametername'] = tmp.text
tmp = doc.find(util.nspath_eval('csw30:ValueReference',
self.parent.context.namespaces))
if tmp is not None:
request['valuereference'] = tmp.text
# GetRecords
if request['request'] == 'GetRecords':
tmp = doc.find('.').attrib.get('outputSchema')
request['outputschema'] = tmp if tmp is not None \
else self.parent.context.namespaces['csw30']
tmp = doc.find('.').attrib.get('outputFormat')
request['outputformat'] = tmp if tmp is not None \
else 'application/xml'
tmp = doc.find('.').attrib.get('startPosition')
request['startposition'] = tmp if tmp is not None else 1
tmp = doc.find('.').attrib.get('requestId')
request['requestid'] = tmp if tmp is not None else None
tmp = doc.find('.').attrib.get('maxRecords')
if tmp is not None:
request['maxrecords'] = tmp
tmp = doc.find(util.nspath_eval('csw30:DistributedSearch',
self.parent.context.namespaces))
if tmp is not None:
request['distributedsearch'] = True
hopcount = tmp.attrib.get('hopCount')
request['hopcount'] = int(hopcount)-1 if hopcount is not None \
else 1
else:
request['distributedsearch'] = False
tmp = doc.find(util.nspath_eval('csw30:ResponseHandler',
self.parent.context.namespaces))
if tmp is not None:
request['responsehandler'] = tmp.text
tmp = doc.find(util.nspath_eval('csw30:Query/csw30:ElementSetName',
self.parent.context.namespaces))
request['elementsetname'] = tmp.text if tmp is not None else None
tmp = doc.find(util.nspath_eval(
'csw30:Query', self.parent.context.namespaces)).attrib.get('typeNames')
request['typenames'] = tmp.split() if tmp is not None \
else 'csw:Record'
request['elementname'] = [elname.text for elname in \
doc.findall(util.nspath_eval('csw30:Query/csw30:ElementName',
self.parent.context.namespaces))]
request['constraint'] = {}
tmp = doc.find(util.nspath_eval('csw30:Query/csw30:Constraint',
self.parent.context.namespaces))
if tmp is not None:
request['constraint'] = self._parse_constraint(tmp)
if isinstance(request['constraint'], str): # parse error
return 'Invalid Constraint: %s' % request['constraint']
else:
LOGGER.debug('No csw30:Constraint (fes20:Filter or csw30:CqlText) \
specified')
tmp = doc.find(util.nspath_eval('csw30:Query/fes20:SortBy',
self.parent.context.namespaces))
if tmp is not None:
LOGGER.debug('Sorted query specified')
request['sortby'] = {}
try:
elname = tmp.find(util.nspath_eval(
'fes20:SortProperty/fes20:ValueReference',
self.parent.context.namespaces)).text
request['sortby']['propertyname'] = \
self.parent.repository.queryables['_all'][elname]['dbcol']
if (elname.find('BoundingBox') != -1 or
elname.find('Envelope') != -1):
# it's a spatial sort
request['sortby']['spatial'] = True
except Exception as err:
errortext = \
'Invalid fes20:SortProperty/fes20:ValueReference: %s' % str(err)
LOGGER.exception(errortext)
return errortext
tmp2 = tmp.find(util.nspath_eval(
'fes20:SortProperty/fes20:SortOrder', self.parent.context.namespaces))
request['sortby']['order'] = tmp2.text if tmp2 is not None \
else 'ASC'
else:
request['sortby'] = None
# GetRecordById
if request['request'] == 'GetRecordById':
request['id'] = None
tmp = doc.find(util.nspath_eval('csw30:Id', self.parent.context.namespaces))
if tmp is not None:
request['id'] = tmp.text
tmp = doc.find(util.nspath_eval('csw30:ElementSetName',
self.parent.context.namespaces))
request['elementsetname'] = tmp.text if tmp is not None \
else 'summary'
tmp = doc.find('.').attrib.get('outputSchema')
request['outputschema'] = tmp if tmp is not None \
else self.parent.context.namespaces['csw30']
tmp = doc.find('.').attrib.get('outputFormat')
if tmp is not None:
request['outputformat'] = tmp
# Transaction
if request['request'] == 'Transaction':
request['verboseresponse'] = True
tmp = doc.find('.').attrib.get('verboseResponse')
if tmp is not None:
if tmp in ['false', '0']:
request['verboseresponse'] = False
tmp = doc.find('.').attrib.get('requestId')
request['requestid'] = tmp if tmp is not None else None
request['transactions'] = []
for ttype in \
doc.xpath('//csw30:Insert', namespaces=self.parent.context.namespaces):
tname = ttype.attrib.get('typeName')
for mdrec in ttype.xpath('child::*'):
xml = mdrec
request['transactions'].append(
{'type': 'insert', 'typename': tname, 'xml': xml})
for ttype in \
doc.xpath('//csw30:Update', namespaces=self.parent.context.namespaces):
child = ttype.xpath('child::*')
update = {'type': 'update'}
if len(child) == 1: # it's a wholesale update
update['xml'] = child[0]
else: # it's a RecordProperty with Constraint Update
update['recordproperty'] = []
for recprop in ttype.findall(
util.nspath_eval('csw:RecordProperty',
self.parent.context.namespaces)):
rpname = recprop.find(util.nspath_eval('csw30:Name',
self.parent.context.namespaces)).text
rpvalue = recprop.find(
util.nspath_eval('csw30:Value',
self.parent.context.namespaces)).text
update['recordproperty'].append(
{'name': rpname, 'value': rpvalue})
update['constraint'] = self._parse_constraint(
ttype.find(util.nspath_eval('csw30:Constraint',
self.parent.context.namespaces)))
request['transactions'].append(update)
for ttype in \
doc.xpath('//csw30:Delete', namespaces=self.parent.context.namespaces):
tname = ttype.attrib.get('typeName')
constraint = self._parse_constraint(
ttype.find(util.nspath_eval('csw30:Constraint',
self.parent.context.namespaces)))
if isinstance(constraint, str): # parse error
return 'Invalid Constraint: %s' % constraint
request['transactions'].append(
{'type': 'delete', 'typename': tname, 'constraint': constraint})
# Harvest
if request['request'] == 'Harvest':
request['source'] = doc.find(util.nspath_eval('csw30:Source',
self.parent.context.namespaces)).text
request['resourcetype'] = \
doc.find(util.nspath_eval('csw30:ResourceType',
self.parent.context.namespaces)).text
tmp = doc.find(util.nspath_eval('csw30:ResourceFormat',
self.parent.context.namespaces))
if tmp is not None:
request['resourceformat'] = tmp.text
else:
request['resourceformat'] = 'application/xml'
tmp = doc.find(util.nspath_eval('csw30:HarvestInterval',
self.parent.context.namespaces))
if tmp is not None:
request['harvestinterval'] = tmp.text
tmp = doc.find(util.nspath_eval('csw30:ResponseHandler',
self.parent.context.namespaces))
if tmp is not None:
request['responsehandler'] = tmp.text
return request
def _write_transactionsummary(self, inserted=0, updated=0, deleted=0):
''' Write csw:TransactionSummary construct '''
node = etree.Element(util.nspath_eval('csw30:TransactionSummary',
self.parent.context.namespaces))
if 'requestid' in self.parent.kvp and self.parent.kvp['requestid'] is not None:
node.attrib['requestId'] = self.parent.kvp['requestid']
etree.SubElement(node, util.nspath_eval('csw30:totalInserted',
self.parent.context.namespaces)).text = str(inserted)
etree.SubElement(node, util.nspath_eval('csw30:totalUpdated',
self.parent.context.namespaces)).text = str(updated)
etree.SubElement(node, util.nspath_eval('csw30:totalDeleted',
self.parent.context.namespaces)).text = str(deleted)
return node
def _write_acknowledgement(self, root=True):
''' Generate csw:Acknowledgement '''
node = etree.Element(util.nspath_eval('csw30:Acknowledgement',
self.parent.context.namespaces),
nsmap = self.parent.context.namespaces, timeStamp=util.get_today_and_now())
if root:
node.attrib[util.nspath_eval('xsi:schemaLocation',
self.parent.context.namespaces)] = \
'%s %s/cat/csw/3.0/cswAll.xsd' % (self.parent.context.namespaces['csw30'], \
self.parent.config.get('server', 'ogc_schemas_base'))
node1 = etree.SubElement(node, util.nspath_eval('csw30:EchoedRequest',
self.parent.context.namespaces))
if self.parent.requesttype == 'POST':
node1.append(etree.fromstring(self.parent.request, self.parent.context.parser))
else: # GET
node2 = etree.SubElement(node1, util.nspath_eval('ows:Get',
self.parent.context.namespaces))
node2.text = self.parent.request
if self.parent.asynchronous:
etree.SubElement(node, util.nspath_eval('csw30:RequestId',
self.parent.context.namespaces)).text = self.parent.kvp['requestid']
return node
def _write_verboseresponse(self, insertresults):
''' show insert result identifiers '''
insertresult = etree.Element(util.nspath_eval('csw30:InsertResult',
self.parent.context.namespaces))
for ir in insertresults:
briefrec = etree.SubElement(insertresult,
util.nspath_eval('csw30:BriefRecord',
self.parent.context.namespaces))
etree.SubElement(briefrec,
util.nspath_eval('dc:identifier',
self.parent.context.namespaces)).text = ir['identifier']
etree.SubElement(briefrec,
util.nspath_eval('dc:title',
self.parent.context.namespaces)).text = ir['title']
return insertresult
def _write_allowed_values(self, values):
''' design pattern to write ows20:AllowedValues '''
allowed_values = etree.Element(util.nspath_eval('ows20:AllowedValues',
self.parent.context.namespaces))
for value in sorted(values):
etree.SubElement(allowed_values,
util.nspath_eval('ows20:Value',
self.parent.context.namespaces)).text = value
return allowed_values
def exceptionreport(self, code, locator, text):
''' Generate ExceptionReport '''
self.parent.exception = True
self.parent.status = code
try:
language = self.parent.config.get('server', 'language')
ogc_schemas_base = self.parent.config.get('server', 'ogc_schemas_base')
except:
language = 'en-US'
ogc_schemas_base = self.parent.context.ogc_schemas_base
node = etree.Element(util.nspath_eval('ows20:ExceptionReport',
self.parent.context.namespaces), nsmap=self.parent.context.namespaces,
version='3.0.0')
node.attrib['{http://www.w3.org/XML/1998/namespace}lang'] = language
node.attrib[util.nspath_eval('xsi:schemaLocation',
self.parent.context.namespaces)] = \
'%s %s/ows/2.0/owsExceptionReport.xsd' % \
(self.parent.context.namespaces['ows20'], ogc_schemas_base)
exception = etree.SubElement(node, util.nspath_eval('ows20:Exception',
self.parent.context.namespaces),
exceptionCode=code, locator=locator)
exception_text = etree.SubElement(exception,
util.nspath_eval('ows20:ExceptionText',
self.parent.context.namespaces))
try:
exception_text.text = text
except ValueError as err:
exception_text.text = repr(text)
return node
def resolve_nsmap(self, list_):
'''' Resolve typename bindings based on default and KVP namespaces '''
nsmap = {}
tns = []
LOGGER.debug('Namespace list pairs: %s', list_)
# bind KVP namespaces into typenames
for ns in self.parent.kvp['namespace'].split(','):
nspair = ns.split('(')[1].split(')')[0].split('=')
if len(nspair) == 1: # default namespace
nsmap['csw'] = nspair[1]
else:
nsmap[nspair[0]] = nspair[1]
LOGGER.debug('Namespace pairs: %s', nsmap)
for tn in list_:
LOGGER.debug(tn)
if tn.find(':') != -1: # resolve prefix
prefix = tn.split(':')[0]
if prefix in nsmap.keys(): # get uri
uri = nsmap[prefix]
newprefix = next(k for k, v in self.parent.context.namespaces.items() if v == uri)
LOGGER.debug(uri)
LOGGER.debug(prefix)
LOGGER.debug(newprefix)
#if prefix == 'csw30': newprefix = 'csw'
newvalue = tn.replace(prefix, newprefix).replace('csw30', 'csw')
else:
newvalue = tn
else: # default namespace
newvalue = tn
tns.append(newvalue)
LOGGER.debug(tns)
return tns
def write_boundingbox(bbox, nsmap):
''' Generate ows20:BoundingBox '''
if bbox is not None:
try:
bbox2 = util.wkt2geom(bbox)
except:
return None
if len(bbox2) == 4:
boundingbox = etree.Element(util.nspath_eval('ows20:BoundingBox',
nsmap), crs='http://www.opengis.net/def/crs/EPSG/0/4326',
dimensions='2')
etree.SubElement(boundingbox, util.nspath_eval('ows20:LowerCorner',
nsmap)).text = '%s %s' % (bbox2[1], bbox2[0])
etree.SubElement(boundingbox, util.nspath_eval('ows20:UpperCorner',
nsmap)).text = '%s %s' % (bbox2[3], bbox2[2])
return boundingbox
else:
return None
else:
return None
if nextrecord == 0:
searchresult_status = 'complete'
elif nextrecord > 0:
searchresult_status = 'subset'
elif matched == 0:
searchresult_status = 'none'
def get_resultset_status(matched, nextrecord):
''' Helper function to assess status of a result set '''
status = 'subset' # default
if nextrecord == 0:
status = 'complete'
elif matched == 0:
status = 'none'
return status
def get_elapsed_time(begin, end):
"""Helper function to calculate elapsed time in milliseconds."""
return int((end - begin) * 1000)
| mit | 66d5d102432a8e2addaf4e3e725f158d | 47.046217 | 223 | 0.559177 | 4.45332 | false | false | false | false |
python-escpos/python-escpos | src/escpos/capabilities.py | 2 | 4381 | import re
from os import environ, path
import pkg_resources
import pickle
import logging
import time
import six
import yaml
from tempfile import gettempdir
import platform
from typing import Any, Dict
logging.basicConfig()
logger = logging.getLogger(__name__)
pickle_dir = environ.get("ESCPOS_CAPABILITIES_PICKLE_DIR", gettempdir())
pickle_path = path.join(
pickle_dir, "{v}.capabilities.pickle".format(v=platform.python_version())
)
# get a temporary file from pkg_resources if no file is specified in env
capabilities_path = environ.get(
"ESCPOS_CAPABILITIES_FILE",
pkg_resources.resource_filename(__name__, "capabilities.json"),
)
# Load external printer database
t0 = time.time()
logger.debug("Using capabilities from file: %s", capabilities_path)
if path.exists(pickle_path):
if path.getmtime(capabilities_path) > path.getmtime(pickle_path):
logger.debug("Found a more recent capabilities file")
full_load = True
else:
full_load = False
logger.debug("Loading capabilities from pickle in %s", pickle_path)
with open(pickle_path, "rb") as cf:
CAPABILITIES = pickle.load(cf)
else:
logger.debug("Capabilities pickle file not found: %s", pickle_path)
full_load = True
if full_load:
logger.debug("Loading and pickling capabilities")
with open(capabilities_path) as cp, open(pickle_path, "wb") as pp:
CAPABILITIES = yaml.safe_load(cp)
pickle.dump(CAPABILITIES, pp, protocol=2)
logger.debug("Finished loading capabilities took %.2fs", time.time() - t0)
PROFILES: Dict[str, Any] = CAPABILITIES["profiles"]
class NotSupported(Exception):
"""Raised if a requested feature is not supported by the
printer profile.
"""
pass
BARCODE_B = "barcodeB"
class BaseProfile(object):
"""This represents a printer profile.
A printer profile knows about the number of columns, supported
features, colors and more.
"""
profile_data: Dict[str, Any] = {}
def __getattr__(self, name):
return self.profile_data[name]
def get_font(self, font) -> int:
"""Return the escpos index for `font`. Makes sure that
the requested `font` is valid.
"""
font = {"a": 0, "b": 1}.get(font, font)
if not six.text_type(font) in self.fonts:
raise NotSupported(
'"{}" is not a valid font in the current profile'.format(font)
)
return font
def get_columns(self, font):
"""Return the number of columns for the given font."""
font = self.get_font(font)
return self.fonts[six.text_type(font)]["columns"]
def supports(self, feature):
"""Return true/false for the given feature."""
return self.features.get(feature)
def get_code_pages(self):
"""Return the support code pages as a ``{name: index}`` dict."""
return {v: k for k, v in self.codePages.items()}
def get_profile(name: str = None, **kwargs):
"""Get the profile by name; if no name is given, return the
default profile.
"""
if isinstance(name, Profile):
return name
clazz = get_profile_class(name or "default")
return clazz(**kwargs)
CLASS_CACHE = {}
def get_profile_class(name: str):
"""For the given profile name, load the data from the external
database, then generate dynamically a class.
"""
if name not in CLASS_CACHE:
profile_data = PROFILES[name]
profile_name = clean(name)
class_name = "{}{}Profile".format(profile_name[0].upper(), profile_name[1:])
new_class = type(class_name, (BaseProfile,), {"profile_data": profile_data})
CLASS_CACHE[name] = new_class
return CLASS_CACHE[name]
def clean(s):
# Remove invalid characters
s = re.sub("[^0-9a-zA-Z_]", "", s)
# Remove leading characters until we find a letter or underscore
s = re.sub("^[^a-zA-Z_]+", "", s)
return str(s)
class Profile(get_profile_class("default")):
"""
For users, who want to provide their profile
"""
def __init__(self, columns=None, features=None):
super(Profile, self).__init__()
self.columns = columns
self.features = features or {}
def get_columns(self, font):
if self.columns is not None:
return self.columns
return super(Profile, self).get_columns(font)
| mit | e0a1a615168f5aaccf3bb93d2445bc75 | 27.083333 | 84 | 0.643689 | 3.75729 | false | false | false | false |
dmort27/epitran | epitran/bin/testvectorgen.py | 1 | 1318 | #!/usr/bin/env python
from __future__ import print_function
import argparse
import codecs
import epitran.vector
def main(code, space, infile):
vec = epitran.vector.VectorsWithIPASpace(code, space)
with codecs.open(infile, 'r', 'utf-8') as f:
for line in f:
fields = line.split('\t')
if len(fields) > 1:
word = fields[0]
print(u"WORD: {}".format(word).encode('utf-8'))
segs = vec.word_to_segs(word)
for record in segs:
cat, case, orth, phon, id_, vector = record
print(u"Category: {}".format(cat).encode('utf-8'))
print(u"Case: {}".format(case).encode('utf-8'))
print(u"Orthographic: {}".format(orth).encode('utf-8'))
print(u"Phonetic: {}".format(phon).encode('utf-8'))
print(u"Vector: {}".format(vector).encode('utf-8'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--code', required=True, help='Script code.')
parser.add_argument('-s', '--space', required=True, help='Space.')
parser.add_argument('-i', '--infile', required=True, help='Input file.')
args = parser.parse_args()
main(args.code, args.space, args.infile)
| mit | f8e306f9be7a0b28080d930e07df22b0 | 36.657143 | 76 | 0.550076 | 3.610959 | false | false | false | false |
dmort27/epitran | setup.py | 1 | 1958 | from io import open
from setuptools import setup
setup(name='epitran',
version='1.23',
description='Tools for transcribing languages into IPA.',
long_description=open('README.md', encoding='utf-8').read(),
long_description_content_type='text/markdown',
url='http://github.com/dmort27/epitran',
download_url='https://github.com/dmort27/epitran/archive/1.22.tar.gz',
author='David R. Mortensen',
author_email='dmortens@cs.cmu.edu',
license='MIT',
install_requires=['setuptools',
'regex',
'panphon>=0.20',
'marisa-trie',
'requests'],
extras_require={':python_version<"3.0"': ['subprocess32']},
scripts=['epitran/bin/epitranscribe.py',
'epitran/bin/uigtransliterate.py',
'epitran/bin/detectcaps.py',
'epitran/bin/connl2ipaspace.py',
'epitran/bin/connl2engipaspace.py',
'epitran/bin/migraterules.py',
'epitran/bin/decompose.py',
'epitran/bin/testvectorgen.py',
'epitran/bin/transltf.py'],
packages=['epitran'],
package_dir={'epitran': 'epitran'},
package_data={'epitran': ['data/*.csv', 'data/*.txt',
'data/map/*.csv', 'data/*.json',
'data/pre/*.txt', 'data/post/*.txt',
'data/space/*.csv', 'data/strip/*.csv',
'data/reromanize/*.csv',
'data/rules/*.txt',
'data/bib/*.bib']},
zip_safe=True,
classifiers=['Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Linguistic']
)
| mit | fa7d5f29f50a2a4aec6eefe452a973dc | 44.534884 | 82 | 0.503575 | 3.931727 | false | false | false | false |
crdoconnor/strictyaml | strictyaml/ruamel/emitter.py | 1 | 65161 | # coding: utf-8
from __future__ import absolute_import
from __future__ import print_function
# Emitter expects events obeying the following grammar:
# stream ::= STREAM-START document* STREAM-END
# document ::= DOCUMENT-START node DOCUMENT-END
# node ::= SCALAR | sequence | mapping
# sequence ::= SEQUENCE-START node* SEQUENCE-END
# mapping ::= MAPPING-START (node node)* MAPPING-END
import sys
from strictyaml.ruamel.error import YAMLError, YAMLStreamError
from strictyaml.ruamel.events import * # NOQA
# fmt: off
from strictyaml.ruamel.compat import utf8, text_type, PY2, nprint, dbg, DBG_EVENT, \
check_anchorname_char
# fmt: on
if False: # MYPY
from typing import Any, Dict, List, Union, Text, Tuple, Optional # NOQA
from strictyaml.ruamel.compat import StreamType # NOQA
__all__ = ["Emitter", "EmitterError"]
class EmitterError(YAMLError):
pass
class ScalarAnalysis(object):
def __init__(
self,
scalar,
empty,
multiline,
allow_flow_plain,
allow_block_plain,
allow_single_quoted,
allow_double_quoted,
allow_block,
):
# type: (Any, Any, Any, bool, bool, bool, bool, bool) -> None
self.scalar = scalar
self.empty = empty
self.multiline = multiline
self.allow_flow_plain = allow_flow_plain
self.allow_block_plain = allow_block_plain
self.allow_single_quoted = allow_single_quoted
self.allow_double_quoted = allow_double_quoted
self.allow_block = allow_block
class Indents(object):
# replacement for the list based stack of None/int
def __init__(self):
# type: () -> None
self.values = [] # type: List[Tuple[int, bool]]
def append(self, val, seq):
# type: (Any, Any) -> None
self.values.append((val, seq))
def pop(self):
# type: () -> Any
return self.values.pop()[0]
def last_seq(self):
# type: () -> bool
# return the seq(uence) value for the element added before the last one
# in increase_indent()
try:
return self.values[-2][1]
except IndexError:
return False
def seq_flow_align(self, seq_indent, column):
# type: (int, int) -> int
# extra spaces because of dash
if len(self.values) < 2 or not self.values[-1][1]:
return 0
# -1 for the dash
base = self.values[-1][0] if self.values[-1][0] is not None else 0
return base + seq_indent - column - 1
def __len__(self):
# type: () -> int
return len(self.values)
class Emitter(object):
# fmt: off
DEFAULT_TAG_PREFIXES = {
u'!': u'!',
u'tag:yaml.org,2002:': u'!!',
}
# fmt: on
MAX_SIMPLE_KEY_LENGTH = 128
def __init__(
self,
stream,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
block_seq_indent=None,
top_level_colon_align=None,
prefix_colon=None,
brace_single_entry_mapping_in_flow_sequence=None,
dumper=None,
):
# type: (StreamType, Any, Optional[int], Optional[int], Optional[bool], Any, Optional[int], Optional[bool], Any, Optional[bool], Any) -> None # NOQA
self.dumper = dumper
if self.dumper is not None and getattr(self.dumper, "_emitter", None) is None:
self.dumper._emitter = self
self.stream = stream
# Encoding can be overriden by STREAM-START.
self.encoding = None # type: Optional[Text]
self.allow_space_break = None
# Emitter is a state machine with a stack of states to handle nested
# structures.
self.states = [] # type: List[Any]
self.state = self.expect_stream_start # type: Any
# Current event and the event queue.
self.events = [] # type: List[Any]
self.event = None # type: Any
# The current indentation level and the stack of previous indents.
self.indents = Indents()
self.indent = None # type: Optional[int]
# flow_context is an expanding/shrinking list consisting of '{' and '['
# for each unclosed flow context. If empty list that means block context
self.flow_context = [] # type: List[Text]
# Contexts.
self.root_context = False
self.sequence_context = False
self.mapping_context = False
self.simple_key_context = False
# Characteristics of the last emitted character:
# - current position.
# - is it a whitespace?
# - is it an indention character
# (indentation space, '-', '?', or ':')?
self.line = 0
self.column = 0
self.whitespace = True
self.indention = True
self.compact_seq_seq = True # dash after dash
self.compact_seq_map = True # key after dash
# self.compact_ms = False # dash after key, only when excplicit key with ?
self.no_newline = None # type: Optional[bool] # set if directly after `- `
# Whether the document requires an explicit document end indicator
self.open_ended = False
# colon handling
self.colon = u":"
self.prefixed_colon = (
self.colon if prefix_colon is None else prefix_colon + self.colon
)
# single entry mappings in flow sequence
self.brace_single_entry_mapping_in_flow_sequence = (
brace_single_entry_mapping_in_flow_sequence # NOQA
)
# Formatting details.
self.canonical = canonical
self.allow_unicode = allow_unicode
# set to False to get "\Uxxxxxxxx" for non-basic unicode like emojis
self.unicode_supplementary = sys.maxunicode > 0xFFFF
self.sequence_dash_offset = block_seq_indent if block_seq_indent else 0
self.top_level_colon_align = top_level_colon_align
self.best_sequence_indent = 2
self.requested_indent = indent # specific for literal zero indent
if indent and 1 < indent < 10:
self.best_sequence_indent = indent
self.best_map_indent = self.best_sequence_indent
# if self.best_sequence_indent < self.sequence_dash_offset + 1:
# self.best_sequence_indent = self.sequence_dash_offset + 1
self.best_width = 80
if width and width > self.best_sequence_indent * 2:
self.best_width = width
self.best_line_break = u"\n" # type: Any
if line_break in [u"\r", u"\n", u"\r\n"]:
self.best_line_break = line_break
# Tag prefixes.
self.tag_prefixes = None # type: Any
# Prepared anchor and tag.
self.prepared_anchor = None # type: Any
self.prepared_tag = None # type: Any
# Scalar analysis and style.
self.analysis = None # type: Any
self.style = None # type: Any
self.scalar_after_indicator = True # write a scalar on the same line as `---`
self.alt_null = "null"
@property
def stream(self):
# type: () -> Any
try:
return self._stream
except AttributeError:
raise YAMLStreamError("output stream needs to specified")
@stream.setter
def stream(self, val):
# type: (Any) -> None
if val is None:
return
if not hasattr(val, "write"):
raise YAMLStreamError("stream argument needs to have a write() method")
self._stream = val
@property
def serializer(self):
# type: () -> Any
try:
if hasattr(self.dumper, "typ"):
return self.dumper.serializer
return self.dumper._serializer
except AttributeError:
return self # cyaml
@property
def flow_level(self):
# type: () -> int
return len(self.flow_context)
def dispose(self):
# type: () -> None
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def emit(self, event):
# type: (Any) -> None
if dbg(DBG_EVENT):
nprint(event)
self.events.append(event)
while not self.need_more_events():
self.event = self.events.pop(0)
self.state()
self.event = None
# In some cases, we wait for a few next events before emitting.
def need_more_events(self):
# type: () -> bool
if not self.events:
return True
event = self.events[0]
if isinstance(event, DocumentStartEvent):
return self.need_events(1)
elif isinstance(event, SequenceStartEvent):
return self.need_events(2)
elif isinstance(event, MappingStartEvent):
return self.need_events(3)
else:
return False
def need_events(self, count):
# type: (int) -> bool
level = 0
for event in self.events[1:]:
if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
level += 1
elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
level -= 1
elif isinstance(event, StreamEndEvent):
level = -1
if level < 0:
return False
return len(self.events) < count + 1
def increase_indent(self, flow=False, sequence=None, indentless=False):
# type: (bool, Optional[bool], bool) -> None
self.indents.append(self.indent, sequence)
if self.indent is None: # top level
if flow:
# self.indent = self.best_sequence_indent if self.indents.last_seq() else \
# self.best_map_indent
# self.indent = self.best_sequence_indent
self.indent = self.requested_indent
else:
self.indent = 0
elif not indentless:
self.indent += (
self.best_sequence_indent
if self.indents.last_seq()
else self.best_map_indent
)
# if self.indents.last_seq():
# if self.indent == 0: # top level block sequence
# self.indent = self.best_sequence_indent - self.sequence_dash_offset
# else:
# self.indent += self.best_sequence_indent
# else:
# self.indent += self.best_map_indent
# States.
# Stream handlers.
def expect_stream_start(self):
# type: () -> None
if isinstance(self.event, StreamStartEvent):
if PY2:
if self.event.encoding and not getattr(self.stream, "encoding", None):
self.encoding = self.event.encoding
else:
if self.event.encoding and not hasattr(self.stream, "encoding"):
self.encoding = self.event.encoding
self.write_stream_start()
self.state = self.expect_first_document_start
else:
raise EmitterError("expected StreamStartEvent, but got %s" % (self.event,))
def expect_nothing(self):
# type: () -> None
raise EmitterError("expected nothing, but got %s" % (self.event,))
# Document handlers.
def expect_first_document_start(self):
# type: () -> Any
return self.expect_document_start(first=True)
def expect_document_start(self, first=False):
# type: (bool) -> None
if isinstance(self.event, DocumentStartEvent):
if (self.event.version or self.event.tags) and self.open_ended:
self.write_indicator(u"...", True)
self.write_indent()
if self.event.version:
version_text = self.prepare_version(self.event.version)
self.write_version_directive(version_text)
self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
if self.event.tags:
handles = sorted(self.event.tags.keys())
for handle in handles:
prefix = self.event.tags[handle]
self.tag_prefixes[prefix] = handle
handle_text = self.prepare_tag_handle(handle)
prefix_text = self.prepare_tag_prefix(prefix)
self.write_tag_directive(handle_text, prefix_text)
implicit = (
first
and not self.event.explicit
and not self.canonical
and not self.event.version
and not self.event.tags
and not self.check_empty_document()
)
if not implicit:
self.write_indent()
self.write_indicator(u"---", True)
if self.canonical:
self.write_indent()
self.state = self.expect_document_root
elif isinstance(self.event, StreamEndEvent):
if self.open_ended:
self.write_indicator(u"...", True)
self.write_indent()
self.write_stream_end()
self.state = self.expect_nothing
else:
raise EmitterError(
"expected DocumentStartEvent, but got %s" % (self.event,)
)
def expect_document_end(self):
# type: () -> None
if isinstance(self.event, DocumentEndEvent):
self.write_indent()
if self.event.explicit:
self.write_indicator(u"...", True)
self.write_indent()
self.flush_stream()
self.state = self.expect_document_start
else:
raise EmitterError("expected DocumentEndEvent, but got %s" % (self.event,))
def expect_document_root(self):
# type: () -> None
self.states.append(self.expect_document_end)
self.expect_node(root=True)
# Node handlers.
def expect_node(self, root=False, sequence=False, mapping=False, simple_key=False):
# type: (bool, bool, bool, bool) -> None
self.root_context = root
self.sequence_context = sequence # not used in PyYAML
self.mapping_context = mapping
self.simple_key_context = simple_key
if isinstance(self.event, AliasEvent):
self.expect_alias()
elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
if (
self.process_anchor(u"&")
and isinstance(self.event, ScalarEvent)
and self.sequence_context
):
self.sequence_context = False
if (
root
and isinstance(self.event, ScalarEvent)
and not self.scalar_after_indicator
):
self.write_indent()
self.process_tag()
if isinstance(self.event, ScalarEvent):
# nprint('@', self.indention, self.no_newline, self.column)
self.expect_scalar()
elif isinstance(self.event, SequenceStartEvent):
# nprint('@', self.indention, self.no_newline, self.column)
i2, n2 = self.indention, self.no_newline # NOQA
if self.event.comment:
if self.event.flow_style is False and self.event.comment:
if self.write_post_comment(self.event):
self.indention = False
self.no_newline = True
if self.write_pre_comment(self.event):
self.indention = i2
self.no_newline = not self.indention
if (
self.flow_level
or self.canonical
or self.event.flow_style
or self.check_empty_sequence()
):
self.expect_flow_sequence()
else:
self.expect_block_sequence()
elif isinstance(self.event, MappingStartEvent):
if self.event.flow_style is False and self.event.comment:
self.write_post_comment(self.event)
if self.event.comment and self.event.comment[1]:
self.write_pre_comment(self.event)
if (
self.flow_level
or self.canonical
or self.event.flow_style
or self.check_empty_mapping()
):
self.expect_flow_mapping(single=self.event.nr_items == 1)
else:
self.expect_block_mapping()
else:
raise EmitterError("expected NodeEvent, but got %s" % (self.event,))
def expect_alias(self):
# type: () -> None
if self.event.anchor is None:
raise EmitterError("anchor is not specified for alias")
self.process_anchor(u"*")
self.state = self.states.pop()
def expect_scalar(self):
# type: () -> None
self.increase_indent(flow=True)
self.process_scalar()
self.indent = self.indents.pop()
self.state = self.states.pop()
# Flow sequence handlers.
def expect_flow_sequence(self):
# type: () -> None
ind = self.indents.seq_flow_align(self.best_sequence_indent, self.column)
self.write_indicator(u" " * ind + u"[", True, whitespace=True)
self.increase_indent(flow=True, sequence=True)
self.flow_context.append("[")
self.state = self.expect_first_flow_sequence_item
def expect_first_flow_sequence_item(self):
# type: () -> None
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
popped = self.flow_context.pop()
assert popped == "["
self.write_indicator(u"]", False)
if self.event.comment and self.event.comment[0]:
# eol comment on empty flow sequence
self.write_post_comment(self.event)
elif self.flow_level == 0:
self.write_line_break()
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
def expect_flow_sequence_item(self):
# type: () -> None
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
popped = self.flow_context.pop()
assert popped == "["
if self.canonical:
self.write_indicator(u",", False)
self.write_indent()
self.write_indicator(u"]", False)
if self.event.comment and self.event.comment[0]:
# eol comment on flow sequence
self.write_post_comment(self.event)
else:
self.no_newline = False
self.state = self.states.pop()
else:
self.write_indicator(u",", False)
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
# Flow mapping handlers.
def expect_flow_mapping(self, single=False):
# type: (Optional[bool]) -> None
ind = self.indents.seq_flow_align(self.best_sequence_indent, self.column)
map_init = u"{"
if (
single
and self.flow_level
and self.flow_context[-1] == "["
and not self.canonical
and not self.brace_single_entry_mapping_in_flow_sequence
):
# single map item with flow context, no curly braces necessary
map_init = u""
self.write_indicator(u" " * ind + map_init, True, whitespace=True)
self.flow_context.append(map_init)
self.increase_indent(flow=True, sequence=False)
self.state = self.expect_first_flow_mapping_key
def expect_first_flow_mapping_key(self):
# type: () -> None
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
popped = self.flow_context.pop()
assert popped == "{" # empty flow mapping
self.write_indicator(u"}", False)
if self.event.comment and self.event.comment[0]:
# eol comment on empty mapping
self.write_post_comment(self.event)
elif self.flow_level == 0:
self.write_line_break()
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator(u"?", True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_key(self):
# type: () -> None
if isinstance(self.event, MappingEndEvent):
# if self.event.comment and self.event.comment[1]:
# self.write_pre_comment(self.event)
self.indent = self.indents.pop()
popped = self.flow_context.pop()
assert popped in [u"{", u""]
if self.canonical:
self.write_indicator(u",", False)
self.write_indent()
if popped != u"":
self.write_indicator(u"}", False)
if self.event.comment and self.event.comment[0]:
# eol comment on flow mapping, never reached on empty mappings
self.write_post_comment(self.event)
else:
self.no_newline = False
self.state = self.states.pop()
else:
self.write_indicator(u",", False)
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator(u"?", True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_simple_value(self):
# type: () -> None
self.write_indicator(self.prefixed_colon, False)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
def expect_flow_mapping_value(self):
# type: () -> None
if self.canonical or self.column > self.best_width:
self.write_indent()
self.write_indicator(self.prefixed_colon, True)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
# Block sequence handlers.
def expect_block_sequence(self):
# type: () -> None
if self.mapping_context:
indentless = not self.indention
else:
indentless = False
if not self.compact_seq_seq and self.column != 0:
self.write_line_break()
self.increase_indent(flow=False, sequence=True, indentless=indentless)
self.state = self.expect_first_block_sequence_item
def expect_first_block_sequence_item(self):
# type: () -> Any
return self.expect_block_sequence_item(first=True)
def expect_block_sequence_item(self, first=False):
# type: (bool) -> None
if not first and isinstance(self.event, SequenceEndEvent):
if self.event.comment and self.event.comment[1]:
# final comments on a block list e.g. empty line
self.write_pre_comment(self.event)
self.indent = self.indents.pop()
self.state = self.states.pop()
self.no_newline = False
else:
if self.event.comment and self.event.comment[1]:
self.write_pre_comment(self.event)
nonl = self.no_newline if self.column == 0 else False
self.write_indent()
ind = self.sequence_dash_offset # if len(self.indents) > 1 else 0
self.write_indicator(u" " * ind + u"-", True, indention=True)
if nonl or self.sequence_dash_offset + 2 > self.best_sequence_indent:
self.no_newline = True
self.states.append(self.expect_block_sequence_item)
self.expect_node(sequence=True)
# Block mapping handlers.
def expect_block_mapping(self):
# type: () -> None
if not self.mapping_context and not (self.compact_seq_map or self.column == 0):
self.write_line_break()
self.increase_indent(flow=False, sequence=False)
self.state = self.expect_first_block_mapping_key
def expect_first_block_mapping_key(self):
# type: () -> None
return self.expect_block_mapping_key(first=True)
def expect_block_mapping_key(self, first=False):
# type: (Any) -> None
if not first and isinstance(self.event, MappingEndEvent):
if self.event.comment and self.event.comment[1]:
# final comments from a doc
self.write_pre_comment(self.event)
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
if self.event.comment and self.event.comment[1]:
# final comments from a doc
self.write_pre_comment(self.event)
self.write_indent()
if self.check_simple_key():
if not isinstance(
self.event, (SequenceStartEvent, MappingStartEvent)
): # sequence keys
try:
if self.event.style == "?":
self.write_indicator(u"?", True, indention=True)
except AttributeError: # aliases have no style
pass
self.states.append(self.expect_block_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
if isinstance(self.event, AliasEvent):
self.stream.write(u" ")
else:
self.write_indicator(u"?", True, indention=True)
self.states.append(self.expect_block_mapping_value)
self.expect_node(mapping=True)
def expect_block_mapping_simple_value(self):
# type: () -> None
if getattr(self.event, "style", None) != "?":
# prefix = u''
if self.indent == 0 and self.top_level_colon_align is not None:
# write non-prefixed colon
c = u" " * (self.top_level_colon_align - self.column) + self.colon
else:
c = self.prefixed_colon
self.write_indicator(c, False)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
def expect_block_mapping_value(self):
# type: () -> None
self.write_indent()
self.write_indicator(self.prefixed_colon, True, indention=True)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
# Checkers.
def check_empty_sequence(self):
# type: () -> bool
return (
isinstance(self.event, SequenceStartEvent)
and bool(self.events)
and isinstance(self.events[0], SequenceEndEvent)
)
def check_empty_mapping(self):
# type: () -> bool
return (
isinstance(self.event, MappingStartEvent)
and bool(self.events)
and isinstance(self.events[0], MappingEndEvent)
)
def check_empty_document(self):
# type: () -> bool
if not isinstance(self.event, DocumentStartEvent) or not self.events:
return False
event = self.events[0]
return (
isinstance(event, ScalarEvent)
and event.anchor is None
and event.tag is None
and event.implicit
and event.value == ""
)
def check_simple_key(self):
# type: () -> bool
length = 0
if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
length += len(self.prepared_anchor)
if (
isinstance(self.event, (ScalarEvent, CollectionStartEvent))
and self.event.tag is not None
):
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(self.event.tag)
length += len(self.prepared_tag)
if isinstance(self.event, ScalarEvent):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
length += len(self.analysis.scalar)
return length < self.MAX_SIMPLE_KEY_LENGTH and (
isinstance(self.event, AliasEvent)
or (
isinstance(self.event, SequenceStartEvent)
and self.event.flow_style is True
)
or (
isinstance(self.event, MappingStartEvent)
and self.event.flow_style is True
)
or (
isinstance(self.event, ScalarEvent)
# if there is an explicit style for an empty string, it is a simple key
and not (self.analysis.empty and self.style and self.style not in "'\"")
and not self.analysis.multiline
)
or self.check_empty_sequence()
or self.check_empty_mapping()
)
# Anchor, Tag, and Scalar processors.
def process_anchor(self, indicator):
# type: (Any) -> bool
if self.event.anchor is None:
self.prepared_anchor = None
return False
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
if self.prepared_anchor:
self.write_indicator(indicator + self.prepared_anchor, True)
# issue 288
self.no_newline = False
self.prepared_anchor = None
return True
def process_tag(self):
# type: () -> None
tag = self.event.tag
if isinstance(self.event, ScalarEvent):
if self.style is None:
self.style = self.choose_scalar_style()
if (
self.event.value == ""
and self.style == "'"
and tag == "tag:yaml.org,2002:null"
and self.alt_null is not None
):
self.event.value = self.alt_null
self.analysis = None
self.style = self.choose_scalar_style()
if (not self.canonical or tag is None) and (
(self.style == "" and self.event.implicit[0])
or (self.style != "" and self.event.implicit[1])
):
self.prepared_tag = None
return
if self.event.implicit[0] and tag is None:
tag = u"!"
self.prepared_tag = None
else:
if (not self.canonical or tag is None) and self.event.implicit:
self.prepared_tag = None
return
if tag is None:
raise EmitterError("tag is not specified")
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(tag)
if self.prepared_tag:
self.write_indicator(self.prepared_tag, True)
if (
self.sequence_context
and not self.flow_level
and isinstance(self.event, ScalarEvent)
):
self.no_newline = True
self.prepared_tag = None
def choose_scalar_style(self):
# type: () -> Any
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.event.style == '"' or self.canonical:
return '"'
if (not self.event.style or self.event.style == "?") and (
self.event.implicit[0] or not self.event.implicit[2]
):
if not (
self.simple_key_context
and (self.analysis.empty or self.analysis.multiline)
) and (
self.flow_level
and self.analysis.allow_flow_plain
or (not self.flow_level and self.analysis.allow_block_plain)
):
return ""
self.analysis.allow_block = True
if self.event.style and self.event.style in "|>":
if (
not self.flow_level
and not self.simple_key_context
and self.analysis.allow_block
):
return self.event.style
if not self.event.style and self.analysis.allow_double_quoted:
if "'" in self.event.value or "\n" in self.event.value:
return '"'
if not self.event.style or self.event.style == "'":
if self.analysis.allow_single_quoted and not (
self.simple_key_context and self.analysis.multiline
):
return "'"
return '"'
def process_scalar(self):
# type: () -> None
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.style is None:
self.style = self.choose_scalar_style()
split = not self.simple_key_context
# if self.analysis.multiline and split \
# and (not self.style or self.style in '\'\"'):
# self.write_indent()
# nprint('xx', self.sequence_context, self.flow_level)
if self.sequence_context and not self.flow_level:
self.write_indent()
if self.style == '"':
self.write_double_quoted(self.analysis.scalar, split)
elif self.style == "'":
self.write_single_quoted(self.analysis.scalar, split)
elif self.style == ">":
self.write_folded(self.analysis.scalar)
elif self.style == "|":
self.write_literal(self.analysis.scalar, self.event.comment)
else:
self.write_plain(self.analysis.scalar, split)
self.analysis = None
self.style = None
if self.event.comment:
self.write_post_comment(self.event)
# Analyzers.
def prepare_version(self, version):
# type: (Any) -> Any
major, minor = version
if major != 1:
raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
return u"%d.%d" % (major, minor)
def prepare_tag_handle(self, handle):
# type: (Any) -> Any
if not handle:
raise EmitterError("tag handle must not be empty")
if handle[0] != u"!" or handle[-1] != u"!":
raise EmitterError(
"tag handle must start and end with '!': %r" % (utf8(handle))
)
for ch in handle[1:-1]:
if not (
u"0" <= ch <= u"9"
or u"A" <= ch <= u"Z"
or u"a" <= ch <= u"z"
or ch in u"-_"
):
raise EmitterError(
"invalid character %r in the tag handle: %r"
% (utf8(ch), utf8(handle))
)
return handle
def prepare_tag_prefix(self, prefix):
# type: (Any) -> Any
if not prefix:
raise EmitterError("tag prefix must not be empty")
chunks = [] # type: List[Any]
start = end = 0
if prefix[0] == u"!":
end = 1
ch_set = u"-;/?:@&=+$,_.~*'()[]"
if self.dumper:
version = getattr(self.dumper, "version", (1, 2))
if version is None or version >= (1, 2):
ch_set += u"#"
while end < len(prefix):
ch = prefix[end]
if (
u"0" <= ch <= u"9"
or u"A" <= ch <= u"Z"
or u"a" <= ch <= u"z"
or ch in ch_set
):
end += 1
else:
if start < end:
chunks.append(prefix[start:end])
start = end = end + 1
data = utf8(ch)
for ch in data:
chunks.append(u"%%%02X" % ord(ch))
if start < end:
chunks.append(prefix[start:end])
return "".join(chunks)
def prepare_tag(self, tag):
# type: (Any) -> Any
if not tag:
raise EmitterError("tag must not be empty")
if tag == u"!":
return tag
handle = None
suffix = tag
prefixes = sorted(self.tag_prefixes.keys())
for prefix in prefixes:
if tag.startswith(prefix) and (prefix == u"!" or len(prefix) < len(tag)):
handle = self.tag_prefixes[prefix]
suffix = tag[len(prefix) :]
chunks = [] # type: List[Any]
start = end = 0
ch_set = u"-;/?:@&=+$,_.~*'()[]"
if self.dumper:
version = getattr(self.dumper, "version", (1, 2))
if version is None or version >= (1, 2):
ch_set += u"#"
while end < len(suffix):
ch = suffix[end]
if (
u"0" <= ch <= u"9"
or u"A" <= ch <= u"Z"
or u"a" <= ch <= u"z"
or ch in ch_set
or (ch == u"!" and handle != u"!")
):
end += 1
else:
if start < end:
chunks.append(suffix[start:end])
start = end = end + 1
data = utf8(ch)
for ch in data:
chunks.append(u"%%%02X" % ord(ch))
if start < end:
chunks.append(suffix[start:end])
suffix_text = "".join(chunks)
if handle:
return u"%s%s" % (handle, suffix_text)
else:
return u"!<%s>" % suffix_text
def prepare_anchor(self, anchor):
# type: (Any) -> Any
if not anchor:
raise EmitterError("anchor must not be empty")
for ch in anchor:
if not check_anchorname_char(ch):
raise EmitterError(
"invalid character %r in the anchor: %r" % (utf8(ch), utf8(anchor))
)
return anchor
def analyze_scalar(self, scalar):
# type: (Any) -> Any
# Empty scalar is a special case.
if not scalar:
return ScalarAnalysis(
scalar=scalar,
empty=True,
multiline=False,
allow_flow_plain=False,
allow_block_plain=True,
allow_single_quoted=True,
allow_double_quoted=True,
allow_block=False,
)
# Indicators and special characters.
block_indicators = False
flow_indicators = False
line_breaks = False
special_characters = False
# Important whitespace combinations.
leading_space = False
leading_break = False
trailing_space = False
trailing_break = False
break_space = False
space_break = False
# Check document indicators.
if scalar.startswith(u"---") or scalar.startswith(u"..."):
block_indicators = True
flow_indicators = True
# First character or preceded by a whitespace.
preceeded_by_whitespace = True
# Last character or followed by a whitespace.
followed_by_whitespace = (
len(scalar) == 1 or scalar[1] in u"\0 \t\r\n\x85\u2028\u2029"
)
# The previous character is a space.
previous_space = False
# The previous character is a break.
previous_break = False
index = 0
while index < len(scalar):
ch = scalar[index]
# Check for indicators.
if index == 0:
# Leading indicators are special characters.
if ch in u"#,[]{}&*!|>'\"%@`":
flow_indicators = True
block_indicators = True
if ch in u"?:": # ToDo
if self.serializer.use_version == (1, 1):
flow_indicators = True
elif len(scalar) == 1: # single character
flow_indicators = True
if followed_by_whitespace:
block_indicators = True
if ch == u"-" and followed_by_whitespace:
flow_indicators = True
block_indicators = True
else:
# Some indicators cannot appear within a scalar as well.
if ch in u",[]{}": # http://yaml.org/spec/1.2/spec.html#id2788859
flow_indicators = True
if ch == u"?" and self.serializer.use_version == (1, 1):
flow_indicators = True
if ch == u":":
if followed_by_whitespace:
flow_indicators = True
block_indicators = True
if ch == u"#" and preceeded_by_whitespace:
flow_indicators = True
block_indicators = True
# Check for line breaks, special, and unicode characters.
if ch in u"\n\x85\u2028\u2029":
line_breaks = True
if not (ch == u"\n" or u"\x20" <= ch <= u"\x7E"):
if (
ch == u"\x85"
or u"\xA0" <= ch <= u"\uD7FF"
or u"\uE000" <= ch <= u"\uFFFD"
or (
self.unicode_supplementary
and (u"\U00010000" <= ch <= u"\U0010FFFF")
)
) and ch != u"\uFEFF":
# unicode_characters = True
if not self.allow_unicode:
special_characters = True
else:
special_characters = True
# Detect important whitespace combinations.
if ch == u" ":
if index == 0:
leading_space = True
if index == len(scalar) - 1:
trailing_space = True
if previous_break:
break_space = True
previous_space = True
previous_break = False
elif ch in u"\n\x85\u2028\u2029":
if index == 0:
leading_break = True
if index == len(scalar) - 1:
trailing_break = True
if previous_space:
space_break = True
previous_space = False
previous_break = True
else:
previous_space = False
previous_break = False
# Prepare for the next character.
index += 1
preceeded_by_whitespace = ch in u"\0 \t\r\n\x85\u2028\u2029"
followed_by_whitespace = (
index + 1 >= len(scalar)
or scalar[index + 1] in u"\0 \t\r\n\x85\u2028\u2029"
)
# Let's decide what styles are allowed.
allow_flow_plain = True
allow_block_plain = True
allow_single_quoted = True
allow_double_quoted = True
allow_block = True
# Leading and trailing whitespaces are bad for plain scalars.
if leading_space or leading_break or trailing_space or trailing_break:
allow_flow_plain = allow_block_plain = False
# We do not permit trailing spaces for block scalars.
if trailing_space:
allow_block = False
# Spaces at the beginning of a new line are only acceptable for block
# scalars.
if break_space:
allow_flow_plain = allow_block_plain = allow_single_quoted = False
# Spaces followed by breaks, as well as special character are only
# allowed for double quoted scalars.
if special_characters:
allow_flow_plain = (
allow_block_plain
) = allow_single_quoted = allow_block = False
elif space_break:
allow_flow_plain = allow_block_plain = allow_single_quoted = False
if not self.allow_space_break:
allow_block = False
# Although the plain scalar writer supports breaks, we never emit
# multiline plain scalars.
if line_breaks:
allow_flow_plain = allow_block_plain = False
# Flow indicators are forbidden for flow plain scalars.
if flow_indicators:
allow_flow_plain = False
# Block indicators are forbidden for block plain scalars.
if block_indicators:
allow_block_plain = False
return ScalarAnalysis(
scalar=scalar,
empty=False,
multiline=line_breaks,
allow_flow_plain=allow_flow_plain,
allow_block_plain=allow_block_plain,
allow_single_quoted=allow_single_quoted,
allow_double_quoted=allow_double_quoted,
allow_block=allow_block,
)
# Writers.
def flush_stream(self):
# type: () -> None
if hasattr(self.stream, "flush"):
self.stream.flush()
def write_stream_start(self):
# type: () -> None
# Write BOM if needed.
if self.encoding and self.encoding.startswith("utf-16"):
self.stream.write(u"\uFEFF".encode(self.encoding))
def write_stream_end(self):
# type: () -> None
self.flush_stream()
def write_indicator(
self, indicator, need_whitespace, whitespace=False, indention=False
):
# type: (Any, Any, bool, bool) -> None
if self.whitespace or not need_whitespace:
data = indicator
else:
data = u" " + indicator
self.whitespace = whitespace
self.indention = self.indention and indention
self.column += len(data)
self.open_ended = False
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
def write_indent(self):
# type: () -> None
indent = self.indent or 0
if (
not self.indention
or self.column > indent
or (self.column == indent and not self.whitespace)
):
if bool(self.no_newline):
self.no_newline = False
else:
self.write_line_break()
if self.column < indent:
self.whitespace = True
data = u" " * (indent - self.column)
self.column = indent
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_line_break(self, data=None):
# type: (Any) -> None
if data is None:
data = self.best_line_break
self.whitespace = True
self.indention = True
self.line += 1
self.column = 0
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
def write_version_directive(self, version_text):
# type: (Any) -> None
data = u"%%YAML %s" % version_text
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
def write_tag_directive(self, handle_text, prefix_text):
# type: (Any, Any) -> None
data = u"%%TAG %s %s" % (handle_text, prefix_text)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
# Scalar streams.
def write_single_quoted(self, text, split=True):
# type: (Any, Any) -> None
if self.root_context:
if self.requested_indent is not None:
self.write_line_break()
if self.requested_indent != 0:
self.write_indent()
self.write_indicator(u"'", True)
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch is None or ch != u" ":
if (
start + 1 == end
and self.column > self.best_width
and split
and start != 0
and end != len(text)
):
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch is None or ch not in u"\n\x85\u2028\u2029":
if text[start] == u"\n":
self.write_line_break()
for br in text[start:end]:
if br == u"\n":
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
start = end
else:
if ch is None or ch in u" \n\x85\u2028\u2029" or ch == u"'":
if start < end:
data = text[start:end]
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch == u"'":
data = u"''"
self.column += 2
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
start = end + 1
if ch is not None:
spaces = ch == u" "
breaks = ch in u"\n\x85\u2028\u2029"
end += 1
self.write_indicator(u"'", False)
ESCAPE_REPLACEMENTS = {
u"\0": u"0",
u"\x07": u"a",
u"\x08": u"b",
u"\x09": u"t",
u"\x0A": u"n",
u"\x0B": u"v",
u"\x0C": u"f",
u"\x0D": u"r",
u"\x1B": u"e",
u'"': u'"',
u"\\": u"\\",
u"\x85": u"N",
u"\xA0": u"_",
u"\u2028": u"L",
u"\u2029": u"P",
}
def write_double_quoted(self, text, split=True):
# type: (Any, Any) -> None
if self.root_context:
if self.requested_indent is not None:
self.write_line_break()
if self.requested_indent != 0:
self.write_indent()
self.write_indicator(u'"', True)
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if (
ch is None
or ch in u'"\\\x85\u2028\u2029\uFEFF'
or not (
u"\x20" <= ch <= u"\x7E"
or (
self.allow_unicode
and (u"\xA0" <= ch <= u"\uD7FF" or u"\uE000" <= ch <= u"\uFFFD")
)
)
):
if start < end:
data = text[start:end]
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
if ch in self.ESCAPE_REPLACEMENTS:
data = u"\\" + self.ESCAPE_REPLACEMENTS[ch]
elif ch <= u"\xFF":
data = u"\\x%02X" % ord(ch)
elif ch <= u"\uFFFF":
data = u"\\u%04X" % ord(ch)
else:
data = u"\\U%08X" % ord(ch)
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
start = end + 1
if (
0 < end < len(text) - 1
and (ch == u" " or start >= end)
and self.column + (end - start) > self.best_width
and split
):
data = text[start:end] + u"\\"
if start < end:
start = end
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
self.write_indent()
self.whitespace = False
self.indention = False
if text[start] == u" ":
data = u"\\"
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
end += 1
self.write_indicator(u'"', False)
def determine_block_hints(self, text):
# type: (Any) -> Any
indent = 0
indicator = u""
hints = u""
if text:
if text[0] in u" \n\x85\u2028\u2029":
indent = self.best_sequence_indent
hints += text_type(indent)
elif self.root_context:
for end in ["\n---", "\n..."]:
pos = 0
while True:
pos = text.find(end, pos)
if pos == -1:
break
try:
if text[pos + 4] in " \r\n":
break
except IndexError:
pass
pos += 1
if pos > -1:
break
if pos > 0:
indent = self.best_sequence_indent
if text[-1] not in u"\n\x85\u2028\u2029":
indicator = u"-"
elif len(text) == 1 or text[-2] in u"\n\x85\u2028\u2029":
indicator = u"+"
hints += indicator
return hints, indent, indicator
def write_folded(self, text):
# type: (Any) -> None
hints, _indent, _indicator = self.determine_block_hints(text)
self.write_indicator(u">" + hints, True)
if _indicator == u"+":
self.open_ended = True
self.write_line_break()
leading_space = True
spaces = False
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in u"\n\x85\u2028\u2029\a":
if (
not leading_space
and ch is not None
and ch != u" "
and text[start] == u"\n"
):
self.write_line_break()
leading_space = ch == u" "
for br in text[start:end]:
if br == u"\n":
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
elif spaces:
if ch != u" ":
if start + 1 == end and self.column > self.best_width:
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
start = end
else:
if ch is None or ch in u" \n\x85\u2028\u2029\a":
data = text[start:end]
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
if ch == u"\a":
if end < (len(text) - 1) and not text[end + 2].isspace():
self.write_line_break()
self.write_indent()
end += 2 # \a and the space that is inserted on the fold
else:
raise EmitterError(
"unexcpected fold indicator \\a before space"
)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = ch in u"\n\x85\u2028\u2029"
spaces = ch == u" "
end += 1
def write_literal(self, text, comment=None):
# type: (Any, Any) -> None
hints, _indent, _indicator = self.determine_block_hints(text)
self.write_indicator(u"|" + hints, True)
try:
comment = comment[1][0]
if comment:
self.stream.write(comment)
except (TypeError, IndexError):
pass
if _indicator == u"+":
self.open_ended = True
self.write_line_break()
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in u"\n\x85\u2028\u2029":
for br in text[start:end]:
if br == u"\n":
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
if self.root_context:
idnx = self.indent if self.indent is not None else 0
self.stream.write(u" " * (_indent + idnx))
else:
self.write_indent()
start = end
else:
if ch is None or ch in u"\n\x85\u2028\u2029":
data = text[start:end]
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = ch in u"\n\x85\u2028\u2029"
end += 1
def write_plain(self, text, split=True):
# type: (Any, Any) -> None
if self.root_context:
if self.requested_indent is not None:
self.write_line_break()
if self.requested_indent != 0:
self.write_indent()
else:
self.open_ended = True
if not text:
return
if not self.whitespace:
data = u" "
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.whitespace = False
self.indention = False
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch != u" ":
if start + 1 == end and self.column > self.best_width and split:
self.write_indent()
self.whitespace = False
self.indention = False
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch not in u"\n\x85\u2028\u2029": # type: ignore
if text[start] == u"\n":
self.write_line_break()
for br in text[start:end]:
if br == u"\n":
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
self.whitespace = False
self.indention = False
start = end
else:
if ch is None or ch in u" \n\x85\u2028\u2029":
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
try:
self.stream.write(data)
except: # NOQA
sys.stdout.write(repr(data) + "\n")
raise
start = end
if ch is not None:
spaces = ch == u" "
breaks = ch in u"\n\x85\u2028\u2029"
end += 1
def write_comment(self, comment, pre=False):
# type: (Any, bool) -> None
value = comment.value
# nprintf('{:02d} {:02d} {!r}'.format(self.column, comment.start_mark.column, value))
if not pre and value[-1] == "\n":
value = value[:-1]
try:
# get original column position
col = comment.start_mark.column
if comment.value and comment.value.startswith("\n"):
# never inject extra spaces if the comment starts with a newline
# and not a real comment (e.g. if you have an empty line following a key-value
col = self.column
elif col < self.column + 1:
ValueError
except ValueError:
col = self.column + 1
# nprint('post_comment', self.line, self.column, value)
try:
# at least one space if the current column >= the start column of the comment
# but not at the start of a line
nr_spaces = col - self.column
if self.column and value.strip() and nr_spaces < 1 and value[0] != "\n":
nr_spaces = 1
value = " " * nr_spaces + value
try:
if bool(self.encoding):
value = value.encode(self.encoding)
except UnicodeDecodeError:
pass
self.stream.write(value)
except TypeError:
raise
if not pre:
self.write_line_break()
def write_pre_comment(self, event):
# type: (Any) -> bool
comments = event.comment[1]
if comments is None:
return False
try:
start_events = (MappingStartEvent, SequenceStartEvent)
for comment in comments:
if isinstance(event, start_events) and getattr(
comment, "pre_done", None
):
continue
if self.column != 0:
self.write_line_break()
self.write_comment(comment, pre=True)
if isinstance(event, start_events):
comment.pre_done = True
except TypeError:
sys.stdout.write("eventtt {} {}".format(type(event), event))
raise
return True
def write_post_comment(self, event):
# type: (Any) -> bool
if self.event.comment[0] is None:
return False
comment = event.comment[0]
self.write_comment(comment)
return True
| mit | 1f5d920fc74fd535c5a81072daad2dea | 36.491945 | 157 | 0.498949 | 4.272572 | false | false | false | false |
crdoconnor/strictyaml | strictyaml/ruamel/composer.py | 1 | 8423 | # coding: utf-8
from __future__ import absolute_import, print_function
import warnings
from strictyaml.ruamel.error import MarkedYAMLError, ReusedAnchorWarning
from strictyaml.ruamel.compat import utf8, nprint, nprintf # NOQA
from strictyaml.ruamel.events import (
StreamStartEvent,
StreamEndEvent,
MappingStartEvent,
MappingEndEvent,
SequenceStartEvent,
SequenceEndEvent,
AliasEvent,
ScalarEvent,
)
from strictyaml.ruamel.nodes import MappingNode, ScalarNode, SequenceNode
if False: # MYPY
from typing import Any, Dict, Optional, List # NOQA
__all__ = ["Composer", "ComposerError"]
class ComposerError(MarkedYAMLError):
pass
class Composer(object):
def __init__(self, loader=None):
# type: (Any) -> None
self.loader = loader
if self.loader is not None and getattr(self.loader, "_composer", None) is None:
self.loader._composer = self
self.anchors = {} # type: Dict[Any, Any]
@property
def parser(self):
# type: () -> Any
if hasattr(self.loader, "typ"):
self.loader.parser
return self.loader._parser
@property
def resolver(self):
# type: () -> Any
# assert self.loader._resolver is not None
if hasattr(self.loader, "typ"):
self.loader.resolver
return self.loader._resolver
def check_node(self):
# type: () -> Any
# Drop the STREAM-START event.
if self.parser.check_event(StreamStartEvent):
self.parser.get_event()
# If there are more documents available?
return not self.parser.check_event(StreamEndEvent)
def get_node(self):
# type: () -> Any
# Get the root node of the next document.
if not self.parser.check_event(StreamEndEvent):
return self.compose_document()
def get_single_node(self):
# type: () -> Any
# Drop the STREAM-START event.
self.parser.get_event()
# Compose a document if the stream is not empty.
document = None # type: Any
if not self.parser.check_event(StreamEndEvent):
document = self.compose_document()
# Ensure that the stream contains no more documents.
if not self.parser.check_event(StreamEndEvent):
event = self.parser.get_event()
raise ComposerError(
"expected a single document in the stream",
document.start_mark,
"but found another document",
event.start_mark,
)
# Drop the STREAM-END event.
self.parser.get_event()
return document
def compose_document(self):
# type: (Any) -> Any
# Drop the DOCUMENT-START event.
self.parser.get_event()
# Compose the root node.
node = self.compose_node(None, None)
# Drop the DOCUMENT-END event.
self.parser.get_event()
self.anchors = {}
return node
def compose_node(self, parent, index):
# type: (Any, Any) -> Any
if self.parser.check_event(AliasEvent):
event = self.parser.get_event()
alias = event.anchor
if alias not in self.anchors:
raise ComposerError(
None,
None,
"found undefined alias %r" % utf8(alias),
event.start_mark,
)
return self.anchors[alias]
event = self.parser.peek_event()
anchor = event.anchor
if anchor is not None: # have an anchor
if anchor in self.anchors:
# raise ComposerError(
# "found duplicate anchor %r; first occurrence"
# % utf8(anchor), self.anchors[anchor].start_mark,
# "second occurrence", event.start_mark)
ws = (
"\nfound duplicate anchor {!r}\nfirst occurrence {}\nsecond occurrence "
"{}".format(
(anchor), self.anchors[anchor].start_mark, event.start_mark
)
)
warnings.warn(ws, ReusedAnchorWarning)
self.resolver.descend_resolver(parent, index)
if self.parser.check_event(ScalarEvent):
node = self.compose_scalar_node(anchor)
elif self.parser.check_event(SequenceStartEvent):
node = self.compose_sequence_node(anchor)
elif self.parser.check_event(MappingStartEvent):
node = self.compose_mapping_node(anchor)
self.resolver.ascend_resolver()
return node
def compose_scalar_node(self, anchor):
# type: (Any) -> Any
event = self.parser.get_event()
tag = event.tag
if tag is None or tag == u"!":
tag = self.resolver.resolve(ScalarNode, event.value, event.implicit)
node = ScalarNode(
tag,
event.value,
event.start_mark,
event.end_mark,
style=event.style,
comment=event.comment,
anchor=anchor,
)
if anchor is not None:
self.anchors[anchor] = node
return node
def compose_sequence_node(self, anchor):
# type: (Any) -> Any
start_event = self.parser.get_event()
tag = start_event.tag
if tag is None or tag == u"!":
tag = self.resolver.resolve(SequenceNode, None, start_event.implicit)
node = SequenceNode(
tag,
[],
start_event.start_mark,
None,
flow_style=start_event.flow_style,
comment=start_event.comment,
anchor=anchor,
)
if anchor is not None:
self.anchors[anchor] = node
index = 0
while not self.parser.check_event(SequenceEndEvent):
node.value.append(self.compose_node(node, index))
index += 1
end_event = self.parser.get_event()
if node.flow_style is True and end_event.comment is not None:
if node.comment is not None:
nprint(
"Warning: unexpected end_event commment in sequence "
"node {}".format(node.flow_style)
)
node.comment = end_event.comment
node.end_mark = end_event.end_mark
self.check_end_doc_comment(end_event, node)
return node
def compose_mapping_node(self, anchor):
# type: (Any) -> Any
start_event = self.parser.get_event()
tag = start_event.tag
if tag is None or tag == u"!":
tag = self.resolver.resolve(MappingNode, None, start_event.implicit)
node = MappingNode(
tag,
[],
start_event.start_mark,
None,
flow_style=start_event.flow_style,
comment=start_event.comment,
anchor=anchor,
)
if anchor is not None:
self.anchors[anchor] = node
while not self.parser.check_event(MappingEndEvent):
# key_event = self.parser.peek_event()
item_key = self.compose_node(node, None)
# if item_key in node.value:
# raise ComposerError("while composing a mapping",
# start_event.start_mark,
# "found duplicate key", key_event.start_mark)
item_value = self.compose_node(node, item_key)
# node.value[item_key] = item_value
node.value.append((item_key, item_value))
end_event = self.parser.get_event()
if node.flow_style is True and end_event.comment is not None:
node.comment = end_event.comment
node.end_mark = end_event.end_mark
self.check_end_doc_comment(end_event, node)
return node
def check_end_doc_comment(self, end_event, node):
# type: (Any, Any) -> None
if end_event.comment and end_event.comment[1]:
# pre comments on an end_event, no following to move to
if node.comment is None:
node.comment = [None, None]
assert not isinstance(node, ScalarEvent)
# this is a post comment on a mapping node, add as third element
# in the list
node.comment.append(end_event.comment[1])
end_event.comment[1] = None
| mit | 8f3b400bbedb9b8536a6c7ca93cec6dd | 33.662551 | 92 | 0.561795 | 4.132974 | false | false | false | false |
crdoconnor/strictyaml | strictyaml/ruamel/main.py | 1 | 54977 | # coding: utf-8
from __future__ import absolute_import, unicode_literals, print_function
import sys
import os
import warnings
import glob
from importlib import import_module
import strictyaml.ruamel
from strictyaml.ruamel.error import UnsafeLoaderWarning, YAMLError # NOQA
from strictyaml.ruamel.tokens import * # NOQA
from strictyaml.ruamel.events import * # NOQA
from strictyaml.ruamel.nodes import * # NOQA
from strictyaml.ruamel.loader import (
BaseLoader,
SafeLoader,
Loader,
RoundTripLoader,
) # NOQA
from strictyaml.ruamel.dumper import (
BaseDumper,
SafeDumper,
Dumper,
RoundTripDumper,
) # NOQA
from strictyaml.ruamel.compat import StringIO, BytesIO, with_metaclass, PY3, nprint
from strictyaml.ruamel.resolver import VersionedResolver, Resolver # NOQA
from strictyaml.ruamel.representer import (
BaseRepresenter,
SafeRepresenter,
Representer,
RoundTripRepresenter,
)
from strictyaml.ruamel.constructor import (
BaseConstructor,
SafeConstructor,
Constructor,
RoundTripConstructor,
)
from strictyaml.ruamel.loader import Loader as UnsafeLoader
if False: # MYPY
from typing import List, Set, Dict, Union, Any, Callable, Optional, Text # NOQA
from strictyaml.ruamel.compat import StreamType, StreamTextType, VersionType # NOQA
if PY3:
from pathlib import Path
else:
Path = Any
try:
from _ruamel_yaml import CParser, CEmitter # type: ignore
except: # NOQA
CParser = CEmitter = None
# import io
enforce = object()
# YAML is an acronym, i.e. spoken: rhymes with "camel". And thus a
# subset of abbreviations, which should be all caps according to PEP8
class YAML(object):
def __init__(
self,
_kw=enforce,
typ=None,
pure=False,
output=None,
plug_ins=None, # input=None,
):
# type: (Any, Optional[Text], Any, Any, Any) -> None
"""
_kw: not used, forces keyword arguments in 2.7 (in 3 you can do (*, safe_load=..)
typ: 'rt'/None -> RoundTripLoader/RoundTripDumper, (default)
'safe' -> SafeLoader/SafeDumper,
'unsafe' -> normal/unsafe Loader/Dumper
'base' -> baseloader
pure: if True only use Python modules
input/output: needed to work as context manager
plug_ins: a list of plug-in files
"""
if _kw is not enforce:
raise TypeError(
"{}.__init__() takes no positional argument but at least "
"one was given ({!r})".format(self.__class__.__name__, _kw)
)
self.typ = ["rt"] if typ is None else (typ if isinstance(typ, list) else [typ])
self.pure = pure
# self._input = input
self._output = output
self._context_manager = None # type: Any
self.plug_ins = [] # type: List[Any]
for pu in ([] if plug_ins is None else plug_ins) + self.official_plug_ins():
file_name = pu.replace(os.sep, ".")
self.plug_ins.append(import_module(file_name))
self.Resolver = strictyaml.ruamel.resolver.VersionedResolver # type: Any
self.allow_unicode = True
self.Reader = None # type: Any
self.Representer = None # type: Any
self.Constructor = None # type: Any
self.Scanner = None # type: Any
self.Serializer = None # type: Any
self.default_flow_style = None # type: Any
typ_found = 1
setup_rt = False
if "rt" in self.typ:
setup_rt = True
elif "safe" in self.typ:
self.Emitter = (
strictyaml.ruamel.emitter.Emitter
if pure or CEmitter is None
else CEmitter
)
self.Representer = strictyaml.ruamel.representer.SafeRepresenter
self.Parser = (
strictyaml.ruamel.parser.Parser if pure or CParser is None else CParser
)
self.Composer = strictyaml.ruamel.composer.Composer
self.Constructor = strictyaml.ruamel.constructor.SafeConstructor
elif "base" in self.typ:
self.Emitter = strictyaml.ruamel.emitter.Emitter
self.Representer = strictyaml.ruamel.representer.BaseRepresenter
self.Parser = (
strictyaml.ruamel.parser.Parser if pure or CParser is None else CParser
)
self.Composer = strictyaml.ruamel.composer.Composer
self.Constructor = strictyaml.ruamel.constructor.BaseConstructor
elif "unsafe" in self.typ:
self.Emitter = (
strictyaml.ruamel.emitter.Emitter
if pure or CEmitter is None
else CEmitter
)
self.Representer = strictyaml.ruamel.representer.Representer
self.Parser = (
strictyaml.ruamel.parser.Parser if pure or CParser is None else CParser
)
self.Composer = strictyaml.ruamel.composer.Composer
self.Constructor = strictyaml.ruamel.constructor.Constructor
else:
setup_rt = True
typ_found = 0
if setup_rt:
self.default_flow_style = False
# no optimized rt-dumper yet
self.Emitter = strictyaml.ruamel.emitter.Emitter
self.Serializer = strictyaml.ruamel.serializer.Serializer
self.Representer = strictyaml.ruamel.representer.RoundTripRepresenter
self.Scanner = strictyaml.ruamel.scanner.RoundTripScanner
# no optimized rt-parser yet
self.Parser = strictyaml.ruamel.parser.RoundTripParser
self.Composer = strictyaml.ruamel.composer.Composer
self.Constructor = strictyaml.ruamel.constructor.RoundTripConstructor
del setup_rt
self.stream = None
self.canonical = None
self.old_indent = None
self.width = None
self.line_break = None
self.map_indent = None
self.sequence_indent = None
self.sequence_dash_offset = 0
self.compact_seq_seq = None
self.compact_seq_map = None
self.sort_base_mapping_type_on_output = None # default: sort
self.top_level_colon_align = None
self.prefix_colon = None
self.version = None
self.preserve_quotes = None
self.allow_duplicate_keys = False # duplicate keys in map, set
self.encoding = "utf-8"
self.explicit_start = None
self.explicit_end = None
self.tags = None
self.default_style = None
self.top_level_block_style_scalar_no_indent_error_1_1 = False
# directives end indicator with single scalar document
self.scalar_after_indicator = None
# [a, b: 1, c: {d: 2}] vs. [a, {b: 1}, {c: {d: 2}}]
self.brace_single_entry_mapping_in_flow_sequence = False
for module in self.plug_ins:
if getattr(module, "typ", None) in self.typ:
typ_found += 1
module.init_typ(self)
break
if typ_found == 0:
raise NotImplementedError(
'typ "{}"not recognised (need to install plug-in?)'.format(self.typ)
)
@property
def reader(self):
# type: () -> Any
try:
return self._reader # type: ignore
except AttributeError:
self._reader = self.Reader(None, loader=self)
return self._reader
@property
def scanner(self):
# type: () -> Any
try:
return self._scanner # type: ignore
except AttributeError:
self._scanner = self.Scanner(loader=self)
return self._scanner
@property
def parser(self):
# type: () -> Any
attr = "_" + sys._getframe().f_code.co_name
if not hasattr(self, attr):
if self.Parser is not CParser:
setattr(self, attr, self.Parser(loader=self))
else:
if getattr(self, "_stream", None) is None:
# wait for the stream
return None
else:
# if not hasattr(self._stream, 'read') and hasattr(self._stream, 'open'):
# # pathlib.Path() instance
# setattr(self, attr, CParser(self._stream))
# else:
setattr(self, attr, CParser(self._stream))
# self._parser = self._composer = self
# nprint('scanner', self.loader.scanner)
return getattr(self, attr)
@property
def composer(self):
# type: () -> Any
attr = "_" + sys._getframe().f_code.co_name
if not hasattr(self, attr):
setattr(self, attr, self.Composer(loader=self))
return getattr(self, attr)
@property
def constructor(self):
# type: () -> Any
attr = "_" + sys._getframe().f_code.co_name
if not hasattr(self, attr):
cnst = self.Constructor(preserve_quotes=self.preserve_quotes, loader=self)
cnst.allow_duplicate_keys = self.allow_duplicate_keys
setattr(self, attr, cnst)
return getattr(self, attr)
@property
def resolver(self):
# type: () -> Any
attr = "_" + sys._getframe().f_code.co_name
if not hasattr(self, attr):
setattr(self, attr, self.Resolver(version=self.version, loader=self))
return getattr(self, attr)
@property
def emitter(self):
# type: () -> Any
attr = "_" + sys._getframe().f_code.co_name
if not hasattr(self, attr):
if self.Emitter is not CEmitter:
_emitter = self.Emitter(
None,
canonical=self.canonical,
indent=self.old_indent,
width=self.width,
allow_unicode=self.allow_unicode,
line_break=self.line_break,
prefix_colon=self.prefix_colon,
brace_single_entry_mapping_in_flow_sequence=self.brace_single_entry_mapping_in_flow_sequence, # NOQA
dumper=self,
)
setattr(self, attr, _emitter)
if self.map_indent is not None:
_emitter.best_map_indent = self.map_indent
if self.sequence_indent is not None:
_emitter.best_sequence_indent = self.sequence_indent
if self.sequence_dash_offset is not None:
_emitter.sequence_dash_offset = self.sequence_dash_offset
# _emitter.block_seq_indent = self.sequence_dash_offset
if self.compact_seq_seq is not None:
_emitter.compact_seq_seq = self.compact_seq_seq
if self.compact_seq_map is not None:
_emitter.compact_seq_map = self.compact_seq_map
else:
if getattr(self, "_stream", None) is None:
# wait for the stream
return None
return None
return getattr(self, attr)
@property
def serializer(self):
# type: () -> Any
attr = "_" + sys._getframe().f_code.co_name
if not hasattr(self, attr):
setattr(
self,
attr,
self.Serializer(
encoding=self.encoding,
explicit_start=self.explicit_start,
explicit_end=self.explicit_end,
version=self.version,
tags=self.tags,
dumper=self,
),
)
return getattr(self, attr)
@property
def representer(self):
# type: () -> Any
attr = "_" + sys._getframe().f_code.co_name
if not hasattr(self, attr):
repres = self.Representer(
default_style=self.default_style,
default_flow_style=self.default_flow_style,
dumper=self,
)
if self.sort_base_mapping_type_on_output is not None:
repres.sort_base_mapping_type_on_output = (
self.sort_base_mapping_type_on_output
)
setattr(self, attr, repres)
return getattr(self, attr)
# separate output resolver?
# def load(self, stream=None):
# if self._context_manager:
# if not self._input:
# raise TypeError("Missing input stream while dumping from context manager")
# for data in self._context_manager.load():
# yield data
# return
# if stream is None:
# raise TypeError("Need a stream argument when not loading from context manager")
# return self.load_one(stream)
def load(self, stream):
# type: (Union[Path, StreamTextType]) -> Any
"""
at this point you either have the non-pure Parser (which has its own reader and
scanner) or you have the pure Parser.
If the pure Parser is set, then set the Reader and Scanner, if not already set.
If either the Scanner or Reader are set, you cannot use the non-pure Parser,
so reset it to the pure parser and set the Reader resp. Scanner if necessary
"""
if not hasattr(stream, "read") and hasattr(stream, "open"):
# pathlib.Path() instance
with stream.open("rb") as fp:
return self.load(fp)
constructor, parser = self.get_constructor_parser(stream)
try:
return constructor.get_single_data()
finally:
parser.dispose()
try:
self._reader.reset_reader()
except AttributeError:
pass
try:
self._scanner.reset_scanner()
except AttributeError:
pass
def load_all(self, stream, _kw=enforce): # , skip=None):
# type: (Union[Path, StreamTextType], Any) -> Any
if _kw is not enforce:
raise TypeError(
"{}.__init__() takes no positional argument but at least "
"one was given ({!r})".format(self.__class__.__name__, _kw)
)
if not hasattr(stream, "read") and hasattr(stream, "open"):
# pathlib.Path() instance
with stream.open("r") as fp:
for d in self.load_all(fp, _kw=enforce):
yield d
return
# if skip is None:
# skip = []
# elif isinstance(skip, int):
# skip = [skip]
constructor, parser = self.get_constructor_parser(stream)
try:
while constructor.check_data():
yield constructor.get_data()
finally:
parser.dispose()
try:
self._reader.reset_reader()
except AttributeError:
pass
try:
self._scanner.reset_scanner()
except AttributeError:
pass
def get_constructor_parser(self, stream):
# type: (StreamTextType) -> Any
"""
the old cyaml needs special setup, and therefore the stream
"""
if self.Parser is not CParser:
if self.Reader is None:
self.Reader = strictyaml.ruamel.reader.Reader
if self.Scanner is None:
self.Scanner = strictyaml.ruamel.scanner.Scanner
self.reader.stream = stream
else:
if self.Reader is not None:
if self.Scanner is None:
self.Scanner = strictyaml.ruamel.scanner.Scanner
self.Parser = strictyaml.ruamel.parser.Parser
self.reader.stream = stream
elif self.Scanner is not None:
if self.Reader is None:
self.Reader = strictyaml.ruamel.reader.Reader
self.Parser = strictyaml.ruamel.parser.Parser
self.reader.stream = stream
else:
# combined C level reader>scanner>parser
# does some calls to the resolver, e.g. BaseResolver.descend_resolver
# if you just initialise the CParser, to much of resolver.py
# is actually used
rslvr = self.Resolver
# if rslvr is strictyaml.ruamel.resolver.VersionedResolver:
# rslvr = strictyaml.ruamel.resolver.Resolver
class XLoader(self.Parser, self.Constructor, rslvr): # type: ignore
def __init__(
selfx, stream, version=self.version, preserve_quotes=None
):
# type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None # NOQA
CParser.__init__(selfx, stream)
selfx._parser = selfx._composer = selfx
self.Constructor.__init__(selfx, loader=selfx)
selfx.allow_duplicate_keys = self.allow_duplicate_keys
rslvr.__init__(selfx, version=version, loadumper=selfx)
self._stream = stream
loader = XLoader(stream)
return loader, loader
return self.constructor, self.parser
def dump(self, data, stream=None, _kw=enforce, transform=None):
# type: (Any, Union[Path, StreamType], Any, Any) -> Any
if self._context_manager:
if not self._output:
raise TypeError(
"Missing output stream while dumping from context manager"
)
if _kw is not enforce:
raise TypeError(
"{}.dump() takes one positional argument but at least "
"two were given ({!r})".format(self.__class__.__name__, _kw)
)
if transform is not None:
raise TypeError(
"{}.dump() in the context manager cannot have transform keyword "
"".format(self.__class__.__name__)
)
self._context_manager.dump(data)
else: # old style
if stream is None:
raise TypeError(
"Need a stream argument when not dumping from context manager"
)
return self.dump_all([data], stream, _kw, transform=transform)
def dump_all(self, documents, stream, _kw=enforce, transform=None):
# type: (Any, Union[Path, StreamType], Any, Any) -> Any
if self._context_manager:
raise NotImplementedError
if _kw is not enforce:
raise TypeError(
"{}.dump(_all) takes two positional argument but at least "
"three were given ({!r})".format(self.__class__.__name__, _kw)
)
self._output = stream
self._context_manager = YAMLContextManager(self, transform=transform)
for data in documents:
self._context_manager.dump(data)
self._context_manager.teardown_output()
self._output = None
self._context_manager = None
def Xdump_all(self, documents, stream, _kw=enforce, transform=None):
# type: (Any, Union[Path, StreamType], Any, Any) -> Any
"""
Serialize a sequence of Python objects into a YAML stream.
"""
if not hasattr(stream, "write") and hasattr(stream, "open"):
# pathlib.Path() instance
with stream.open("w") as fp:
return self.dump_all(documents, fp, _kw, transform=transform)
if _kw is not enforce:
raise TypeError(
"{}.dump(_all) takes two positional argument but at least "
"three were given ({!r})".format(self.__class__.__name__, _kw)
)
# The stream should have the methods `write` and possibly `flush`.
if self.top_level_colon_align is True:
tlca = max([len(str(x)) for x in documents[0]]) # type: Any
else:
tlca = self.top_level_colon_align
if transform is not None:
fstream = stream
if self.encoding is None:
stream = StringIO()
else:
stream = BytesIO()
serializer, representer, emitter = self.get_serializer_representer_emitter(
stream, tlca
)
try:
self.serializer.open()
for data in documents:
try:
self.representer.represent(data)
except AttributeError:
# nprint(dir(dumper._representer))
raise
self.serializer.close()
finally:
try:
self.emitter.dispose()
except AttributeError:
raise
# self.dumper.dispose() # cyaml
delattr(self, "_serializer")
delattr(self, "_emitter")
if transform:
val = stream.getvalue()
if self.encoding:
val = val.decode(self.encoding)
if fstream is None:
transform(val)
else:
fstream.write(transform(val))
return None
def get_serializer_representer_emitter(self, stream, tlca):
# type: (StreamType, Any) -> Any
# we have only .Serializer to deal with (vs .Reader & .Scanner), much simpler
if self.Emitter is not CEmitter:
if self.Serializer is None:
self.Serializer = strictyaml.ruamel.serializer.Serializer
self.emitter.stream = stream
self.emitter.top_level_colon_align = tlca
if self.scalar_after_indicator is not None:
self.emitter.scalar_after_indicator = self.scalar_after_indicator
return self.serializer, self.representer, self.emitter
if self.Serializer is not None:
# cannot set serializer with CEmitter
self.Emitter = strictyaml.ruamel.emitter.Emitter
self.emitter.stream = stream
self.emitter.top_level_colon_align = tlca
if self.scalar_after_indicator is not None:
self.emitter.scalar_after_indicator = self.scalar_after_indicator
return self.serializer, self.representer, self.emitter
# C routines
rslvr = (
strictyaml.ruamel.resolver.BaseResolver
if "base" in self.typ
else strictyaml.ruamel.resolver.Resolver
)
class XDumper(CEmitter, self.Representer, rslvr): # type: ignore
def __init__(
selfx,
stream,
default_style=None,
default_flow_style=None,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
encoding=None,
explicit_start=None,
explicit_end=None,
version=None,
tags=None,
block_seq_indent=None,
top_level_colon_align=None,
prefix_colon=None,
):
# type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
CEmitter.__init__(
selfx,
stream,
canonical=canonical,
indent=indent,
width=width,
encoding=encoding,
allow_unicode=allow_unicode,
line_break=line_break,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version,
tags=tags,
)
selfx._emitter = selfx._serializer = selfx._representer = selfx
self.Representer.__init__(
selfx,
default_style=default_style,
default_flow_style=default_flow_style,
)
rslvr.__init__(selfx)
self._stream = stream
dumper = XDumper(
stream,
default_style=self.default_style,
default_flow_style=self.default_flow_style,
canonical=self.canonical,
indent=self.old_indent,
width=self.width,
allow_unicode=self.allow_unicode,
line_break=self.line_break,
explicit_start=self.explicit_start,
explicit_end=self.explicit_end,
version=self.version,
tags=self.tags,
)
self._emitter = self._serializer = dumper
return dumper, dumper, dumper
# basic types
def map(self, **kw):
# type: (Any) -> Any
if "rt" in self.typ:
from strictyaml.ruamel.comments import CommentedMap
return CommentedMap(**kw)
else:
return dict(**kw)
def seq(self, *args):
# type: (Any) -> Any
if "rt" in self.typ:
from strictyaml.ruamel.comments import CommentedSeq
return CommentedSeq(*args)
else:
return list(*args)
# helpers
def official_plug_ins(self):
# type: () -> Any
bd = os.path.dirname(__file__)
gpbd = os.path.dirname(os.path.dirname(bd))
res = [x.replace(gpbd, "")[1:-3] for x in glob.glob(bd + "/*/__plug_in__.py")]
return res
def register_class(self, cls):
# type:(Any) -> Any
"""
register a class for dumping loading
- if it has attribute yaml_tag use that to register, else use class name
- if it has methods to_yaml/from_yaml use those to dump/load else dump attributes
as mapping
"""
tag = getattr(cls, "yaml_tag", "!" + cls.__name__)
try:
self.representer.add_representer(cls, cls.to_yaml)
except AttributeError:
def t_y(representer, data):
# type: (Any, Any) -> Any
return representer.represent_yaml_object(
tag, data, cls, flow_style=representer.default_flow_style
)
self.representer.add_representer(cls, t_y)
try:
self.constructor.add_constructor(tag, cls.from_yaml)
except AttributeError:
def f_y(constructor, node):
# type: (Any, Any) -> Any
return constructor.construct_yaml_object(node, cls)
self.constructor.add_constructor(tag, f_y)
return cls
def parse(self, stream):
# type: (StreamTextType) -> Any
"""
Parse a YAML stream and produce parsing events.
"""
_, parser = self.get_constructor_parser(stream)
try:
while parser.check_event():
yield parser.get_event()
finally:
parser.dispose()
try:
self._reader.reset_reader()
except AttributeError:
pass
try:
self._scanner.reset_scanner()
except AttributeError:
pass
# ### context manager
def __enter__(self):
# type: () -> Any
self._context_manager = YAMLContextManager(self)
return self
def __exit__(self, typ, value, traceback):
# type: (Any, Any, Any) -> None
if typ:
nprint("typ", typ)
self._context_manager.teardown_output()
# self._context_manager.teardown_input()
self._context_manager = None
# ### backwards compatibility
def _indent(self, mapping=None, sequence=None, offset=None):
# type: (Any, Any, Any) -> None
if mapping is not None:
self.map_indent = mapping
if sequence is not None:
self.sequence_indent = sequence
if offset is not None:
self.sequence_dash_offset = offset
@property
def indent(self):
# type: () -> Any
return self._indent
@indent.setter
def indent(self, val):
# type: (Any) -> None
self.old_indent = val
@property
def block_seq_indent(self):
# type: () -> Any
return self.sequence_dash_offset
@block_seq_indent.setter
def block_seq_indent(self, val):
# type: (Any) -> None
self.sequence_dash_offset = val
def compact(self, seq_seq=None, seq_map=None):
# type: (Any, Any) -> None
self.compact_seq_seq = seq_seq
self.compact_seq_map = seq_map
class YAMLContextManager(object):
def __init__(self, yaml, transform=None):
# type: (Any, Any) -> None # used to be: (Any, Optional[Callable]) -> None
self._yaml = yaml
self._output_inited = False
self._output_path = None
self._output = self._yaml._output
self._transform = transform
# self._input_inited = False
# self._input = input
# self._input_path = None
# self._transform = yaml.transform
# self._fstream = None
if not hasattr(self._output, "write") and hasattr(self._output, "open"):
# pathlib.Path() instance, open with the same mode
self._output_path = self._output
self._output = self._output_path.open("w")
# if not hasattr(self._stream, 'write') and hasattr(stream, 'open'):
# if not hasattr(self._input, 'read') and hasattr(self._input, 'open'):
# # pathlib.Path() instance, open with the same mode
# self._input_path = self._input
# self._input = self._input_path.open('r')
if self._transform is not None:
self._fstream = self._output
if self._yaml.encoding is None:
self._output = StringIO()
else:
self._output = BytesIO()
def teardown_output(self):
# type: () -> None
if self._output_inited:
self._yaml.serializer.close()
else:
return
try:
self._yaml.emitter.dispose()
except AttributeError:
raise
# self.dumper.dispose() # cyaml
try:
delattr(self._yaml, "_serializer")
delattr(self._yaml, "_emitter")
except AttributeError:
raise
if self._transform:
val = self._output.getvalue()
if self._yaml.encoding:
val = val.decode(self._yaml.encoding)
if self._fstream is None:
self._transform(val)
else:
self._fstream.write(self._transform(val))
self._fstream.flush()
self._output = self._fstream # maybe not necessary
if self._output_path is not None:
self._output.close()
def init_output(self, first_data):
# type: (Any) -> None
if self._yaml.top_level_colon_align is True:
tlca = max([len(str(x)) for x in first_data]) # type: Any
else:
tlca = self._yaml.top_level_colon_align
self._yaml.get_serializer_representer_emitter(self._output, tlca)
self._yaml.serializer.open()
self._output_inited = True
def dump(self, data):
# type: (Any) -> None
if not self._output_inited:
self.init_output(data)
try:
self._yaml.representer.represent(data)
except AttributeError:
# nprint(dir(dumper._representer))
raise
# def teardown_input(self):
# pass
#
# def init_input(self):
# # set the constructor and parser on YAML() instance
# self._yaml.get_constructor_parser(stream)
#
# def load(self):
# if not self._input_inited:
# self.init_input()
# try:
# while self._yaml.constructor.check_data():
# yield self._yaml.constructor.get_data()
# finally:
# parser.dispose()
# try:
# self._reader.reset_reader() # type: ignore
# except AttributeError:
# pass
# try:
# self._scanner.reset_scanner() # type: ignore
# except AttributeError:
# pass
def yaml_object(yml):
# type: (Any) -> Any
"""decorator for classes that needs to dump/load objects
The tag for such objects is taken from the class attribute yaml_tag (or the
class name in lowercase in case unavailable)
If methods to_yaml and/or from_yaml are available, these are called for dumping resp.
loading, default routines (dumping a mapping of the attributes) used otherwise.
"""
def yo_deco(cls):
# type: (Any) -> Any
tag = getattr(cls, "yaml_tag", "!" + cls.__name__)
try:
yml.representer.add_representer(cls, cls.to_yaml)
except AttributeError:
def t_y(representer, data):
# type: (Any, Any) -> Any
return representer.represent_yaml_object(
tag, data, cls, flow_style=representer.default_flow_style
)
yml.representer.add_representer(cls, t_y)
try:
yml.constructor.add_constructor(tag, cls.from_yaml)
except AttributeError:
def f_y(constructor, node):
# type: (Any, Any) -> Any
return constructor.construct_yaml_object(node, cls)
yml.constructor.add_constructor(tag, f_y)
return cls
return yo_deco
########################################################################################
def scan(stream, Loader=Loader):
# type: (StreamTextType, Any) -> Any
"""
Scan a YAML stream and produce scanning tokens.
"""
loader = Loader(stream)
try:
while loader.scanner.check_token():
yield loader.scanner.get_token()
finally:
loader._parser.dispose()
def parse(stream, Loader=Loader):
# type: (StreamTextType, Any) -> Any
"""
Parse a YAML stream and produce parsing events.
"""
loader = Loader(stream)
try:
while loader._parser.check_event():
yield loader._parser.get_event()
finally:
loader._parser.dispose()
def compose(stream, Loader=Loader):
# type: (StreamTextType, Any) -> Any
"""
Parse the first YAML document in a stream
and produce the corresponding representation tree.
"""
loader = Loader(stream)
try:
return loader.get_single_node()
finally:
loader.dispose()
def compose_all(stream, Loader=Loader):
# type: (StreamTextType, Any) -> Any
"""
Parse all YAML documents in a stream
and produce corresponding representation trees.
"""
loader = Loader(stream)
try:
while loader.check_node():
yield loader._composer.get_node()
finally:
loader._parser.dispose()
def load(stream, Loader=None, version=None, preserve_quotes=None):
# type: (StreamTextType, Any, Optional[VersionType], Any) -> Any
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
"""
if Loader is None:
warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2)
Loader = UnsafeLoader
loader = Loader(stream, version, preserve_quotes=preserve_quotes)
try:
return loader._constructor.get_single_data()
finally:
loader._parser.dispose()
try:
loader._reader.reset_reader()
except AttributeError:
pass
try:
loader._scanner.reset_scanner()
except AttributeError:
pass
def load_all(stream, Loader=None, version=None, preserve_quotes=None):
# type: (Optional[StreamTextType], Any, Optional[VersionType], Optional[bool]) -> Any # NOQA
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
"""
if Loader is None:
warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2)
Loader = UnsafeLoader
loader = Loader(stream, version, preserve_quotes=preserve_quotes)
try:
while loader._constructor.check_data():
yield loader._constructor.get_data()
finally:
loader._parser.dispose()
try:
loader._reader.reset_reader()
except AttributeError:
pass
try:
loader._scanner.reset_scanner()
except AttributeError:
pass
def safe_load(stream, version=None):
# type: (StreamTextType, Optional[VersionType]) -> Any
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve only basic YAML tags.
"""
return load(stream, SafeLoader, version)
def safe_load_all(stream, version=None):
# type: (StreamTextType, Optional[VersionType]) -> Any
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve only basic YAML tags.
"""
return load_all(stream, SafeLoader, version)
def round_trip_load(stream, version=None, preserve_quotes=None):
# type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve only basic YAML tags.
"""
return load(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
def round_trip_load_all(stream, version=None, preserve_quotes=None):
# type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve only basic YAML tags.
"""
return load_all(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
def emit(
events,
stream=None,
Dumper=Dumper,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
):
# type: (Any, Optional[StreamType], Any, Optional[bool], Union[int, None], Optional[int], Optional[bool], Any) -> Any # NOQA
"""
Emit YAML parsing events into a stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
stream = StringIO()
getvalue = stream.getvalue
dumper = Dumper(
stream,
canonical=canonical,
indent=indent,
width=width,
allow_unicode=allow_unicode,
line_break=line_break,
)
try:
for event in events:
dumper.emit(event)
finally:
try:
dumper._emitter.dispose()
except AttributeError:
raise
dumper.dispose() # cyaml
if getvalue is not None:
return getvalue()
enc = None if PY3 else "utf-8"
def serialize_all(
nodes,
stream=None,
Dumper=Dumper,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
encoding=enc,
explicit_start=None,
explicit_end=None,
version=None,
tags=None,
):
# type: (Any, Optional[StreamType], Any, Any, Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any) -> Any # NOQA
"""
Serialize a sequence of representation trees into a YAML stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
if encoding is None:
stream = StringIO()
else:
stream = BytesIO()
getvalue = stream.getvalue
dumper = Dumper(
stream,
canonical=canonical,
indent=indent,
width=width,
allow_unicode=allow_unicode,
line_break=line_break,
encoding=encoding,
version=version,
tags=tags,
explicit_start=explicit_start,
explicit_end=explicit_end,
)
try:
dumper._serializer.open()
for node in nodes:
dumper.serialize(node)
dumper._serializer.close()
finally:
try:
dumper._emitter.dispose()
except AttributeError:
raise
dumper.dispose() # cyaml
if getvalue is not None:
return getvalue()
def serialize(node, stream=None, Dumper=Dumper, **kwds):
# type: (Any, Optional[StreamType], Any, Any) -> Any
"""
Serialize a representation tree into a YAML stream.
If stream is None, return the produced string instead.
"""
return serialize_all([node], stream, Dumper=Dumper, **kwds)
def dump_all(
documents,
stream=None,
Dumper=Dumper,
default_style=None,
default_flow_style=None,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
encoding=enc,
explicit_start=None,
explicit_end=None,
version=None,
tags=None,
block_seq_indent=None,
top_level_colon_align=None,
prefix_colon=None,
):
# type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> Optional[str] # NOQA
"""
Serialize a sequence of Python objects into a YAML stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if top_level_colon_align is True:
top_level_colon_align = max([len(str(x)) for x in documents[0]])
if stream is None:
if encoding is None:
stream = StringIO()
else:
stream = BytesIO()
getvalue = stream.getvalue
dumper = Dumper(
stream,
default_style=default_style,
default_flow_style=default_flow_style,
canonical=canonical,
indent=indent,
width=width,
allow_unicode=allow_unicode,
line_break=line_break,
encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version,
tags=tags,
block_seq_indent=block_seq_indent,
top_level_colon_align=top_level_colon_align,
prefix_colon=prefix_colon,
)
try:
dumper._serializer.open()
for data in documents:
try:
dumper._representer.represent(data)
except AttributeError:
# nprint(dir(dumper._representer))
raise
dumper._serializer.close()
finally:
try:
dumper._emitter.dispose()
except AttributeError:
raise
dumper.dispose() # cyaml
if getvalue is not None:
return getvalue()
return None
def dump(
data,
stream=None,
Dumper=Dumper,
default_style=None,
default_flow_style=None,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
encoding=enc,
explicit_start=None,
explicit_end=None,
version=None,
tags=None,
block_seq_indent=None,
):
# type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any) -> Optional[str] # NOQA
"""
Serialize a Python object into a YAML stream.
If stream is None, return the produced string instead.
default_style ∈ None, '', '"', "'", '|', '>'
"""
return dump_all(
[data],
stream,
Dumper=Dumper,
default_style=default_style,
default_flow_style=default_flow_style,
canonical=canonical,
indent=indent,
width=width,
allow_unicode=allow_unicode,
line_break=line_break,
encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version,
tags=tags,
block_seq_indent=block_seq_indent,
)
def safe_dump_all(documents, stream=None, **kwds):
# type: (Any, Optional[StreamType], Any) -> Optional[str]
"""
Serialize a sequence of Python objects into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
def safe_dump(data, stream=None, **kwds):
# type: (Any, Optional[StreamType], Any) -> Optional[str]
"""
Serialize a Python object into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all([data], stream, Dumper=SafeDumper, **kwds)
def round_trip_dump(
data,
stream=None,
Dumper=RoundTripDumper,
default_style=None,
default_flow_style=None,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
encoding=enc,
explicit_start=None,
explicit_end=None,
version=None,
tags=None,
block_seq_indent=None,
top_level_colon_align=None,
prefix_colon=None,
):
# type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any, Any, Any) -> Optional[str] # NOQA
allow_unicode = True if allow_unicode is None else allow_unicode
return dump_all(
[data],
stream,
Dumper=Dumper,
default_style=default_style,
default_flow_style=default_flow_style,
canonical=canonical,
indent=indent,
width=width,
allow_unicode=allow_unicode,
line_break=line_break,
encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version,
tags=tags,
block_seq_indent=block_seq_indent,
top_level_colon_align=top_level_colon_align,
prefix_colon=prefix_colon,
)
# Loader/Dumper are no longer composites, to get to the associated
# Resolver()/Representer(), etc., you need to instantiate the class
def add_implicit_resolver(
tag, regexp, first=None, Loader=None, Dumper=None, resolver=Resolver
):
# type: (Any, Any, Any, Any, Any, Any) -> None
"""
Add an implicit scalar detector.
If an implicit scalar value matches the given regexp,
the corresponding tag is assigned to the scalar.
first is a sequence of possible initial characters or None.
"""
if Loader is None and Dumper is None:
resolver.add_implicit_resolver(tag, regexp, first)
return
if Loader:
if hasattr(Loader, "add_implicit_resolver"):
Loader.add_implicit_resolver(tag, regexp, first)
elif issubclass(
Loader,
(BaseLoader, SafeLoader, strictyaml.ruamel.loader.Loader, RoundTripLoader),
):
Resolver.add_implicit_resolver(tag, regexp, first)
else:
raise NotImplementedError
if Dumper:
if hasattr(Dumper, "add_implicit_resolver"):
Dumper.add_implicit_resolver(tag, regexp, first)
elif issubclass(
Dumper,
(BaseDumper, SafeDumper, strictyaml.ruamel.dumper.Dumper, RoundTripDumper),
):
Resolver.add_implicit_resolver(tag, regexp, first)
else:
raise NotImplementedError
# this code currently not tested
def add_path_resolver(
tag, path, kind=None, Loader=None, Dumper=None, resolver=Resolver
):
# type: (Any, Any, Any, Any, Any, Any) -> None
"""
Add a path based resolver for the given tag.
A path is a list of keys that forms a path
to a node in the representation tree.
Keys can be string values, integers, or None.
"""
if Loader is None and Dumper is None:
resolver.add_path_resolver(tag, path, kind)
return
if Loader:
if hasattr(Loader, "add_path_resolver"):
Loader.add_path_resolver(tag, path, kind)
elif issubclass(
Loader,
(BaseLoader, SafeLoader, strictyaml.ruamel.loader.Loader, RoundTripLoader),
):
Resolver.add_path_resolver(tag, path, kind)
else:
raise NotImplementedError
if Dumper:
if hasattr(Dumper, "add_path_resolver"):
Dumper.add_path_resolver(tag, path, kind)
elif issubclass(
Dumper,
(BaseDumper, SafeDumper, strictyaml.ruamel.dumper.Dumper, RoundTripDumper),
):
Resolver.add_path_resolver(tag, path, kind)
else:
raise NotImplementedError
def add_constructor(tag, object_constructor, Loader=None, constructor=Constructor):
# type: (Any, Any, Any, Any) -> None
"""
Add an object constructor for the given tag.
object_onstructor is a function that accepts a Loader instance
and a node object and produces the corresponding Python object.
"""
if Loader is None:
constructor.add_constructor(tag, object_constructor)
else:
if hasattr(Loader, "add_constructor"):
Loader.add_constructor(tag, object_constructor)
return
if issubclass(Loader, BaseLoader):
BaseConstructor.add_constructor(tag, object_constructor)
elif issubclass(Loader, SafeLoader):
SafeConstructor.add_constructor(tag, object_constructor)
elif issubclass(Loader, Loader):
Constructor.add_constructor(tag, object_constructor)
elif issubclass(Loader, RoundTripLoader):
RoundTripConstructor.add_constructor(tag, object_constructor)
else:
raise NotImplementedError
def add_multi_constructor(
tag_prefix, multi_constructor, Loader=None, constructor=Constructor
):
# type: (Any, Any, Any, Any) -> None
"""
Add a multi-constructor for the given tag prefix.
Multi-constructor is called for a node if its tag starts with tag_prefix.
Multi-constructor accepts a Loader instance, a tag suffix,
and a node object and produces the corresponding Python object.
"""
if Loader is None:
constructor.add_multi_constructor(tag_prefix, multi_constructor)
else:
if False and hasattr(Loader, "add_multi_constructor"):
Loader.add_multi_constructor(tag_prefix, constructor)
return
if issubclass(Loader, BaseLoader):
BaseConstructor.add_multi_constructor(tag_prefix, multi_constructor)
elif issubclass(Loader, SafeLoader):
SafeConstructor.add_multi_constructor(tag_prefix, multi_constructor)
elif issubclass(Loader, strictyaml.ruamel.loader.Loader):
Constructor.add_multi_constructor(tag_prefix, multi_constructor)
elif issubclass(Loader, RoundTripLoader):
RoundTripConstructor.add_multi_constructor(tag_prefix, multi_constructor)
else:
raise NotImplementedError
def add_representer(
data_type, object_representer, Dumper=None, representer=Representer
):
# type: (Any, Any, Any, Any) -> None
"""
Add a representer for the given type.
object_representer is a function accepting a Dumper instance
and an instance of the given data type
and producing the corresponding representation node.
"""
if Dumper is None:
representer.add_representer(data_type, object_representer)
else:
if hasattr(Dumper, "add_representer"):
Dumper.add_representer(data_type, object_representer)
return
if issubclass(Dumper, BaseDumper):
BaseRepresenter.add_representer(data_type, object_representer)
elif issubclass(Dumper, SafeDumper):
SafeRepresenter.add_representer(data_type, object_representer)
elif issubclass(Dumper, Dumper):
Representer.add_representer(data_type, object_representer)
elif issubclass(Dumper, RoundTripDumper):
RoundTripRepresenter.add_representer(data_type, object_representer)
else:
raise NotImplementedError
# this code currently not tested
def add_multi_representer(
data_type, multi_representer, Dumper=None, representer=Representer
):
# type: (Any, Any, Any, Any) -> None
"""
Add a representer for the given type.
multi_representer is a function accepting a Dumper instance
and an instance of the given data type or subtype
and producing the corresponding representation node.
"""
if Dumper is None:
representer.add_multi_representer(data_type, multi_representer)
else:
if hasattr(Dumper, "add_multi_representer"):
Dumper.add_multi_representer(data_type, multi_representer)
return
if issubclass(Dumper, BaseDumper):
BaseRepresenter.add_multi_representer(data_type, multi_representer)
elif issubclass(Dumper, SafeDumper):
SafeRepresenter.add_multi_representer(data_type, multi_representer)
elif issubclass(Dumper, Dumper):
Representer.add_multi_representer(data_type, multi_representer)
elif issubclass(Dumper, RoundTripDumper):
RoundTripRepresenter.add_multi_representer(data_type, multi_representer)
else:
raise NotImplementedError
class YAMLObjectMetaclass(type):
"""
The metaclass for YAMLObject.
"""
def __init__(cls, name, bases, kwds):
# type: (Any, Any, Any) -> None
super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
if "yaml_tag" in kwds and kwds["yaml_tag"] is not None:
cls.yaml_constructor.add_constructor(cls.yaml_tag, cls.from_yaml) # type: ignore
cls.yaml_representer.add_representer(cls, cls.to_yaml) # type: ignore
class YAMLObject(with_metaclass(YAMLObjectMetaclass)): # type: ignore
"""
An object that can dump itself to a YAML stream
and load itself from a YAML stream.
"""
__slots__ = () # no direct instantiation, so allow immutable subclasses
yaml_constructor = Constructor
yaml_representer = Representer
yaml_tag = None # type: Any
yaml_flow_style = None # type: Any
@classmethod
def from_yaml(cls, constructor, node):
# type: (Any, Any) -> Any
"""
Convert a representation node to a Python object.
"""
return constructor.construct_yaml_object(node, cls)
@classmethod
def to_yaml(cls, representer, data):
# type: (Any, Any) -> Any
"""
Convert a Python object to a representation node.
"""
return representer.represent_yaml_object(
cls.yaml_tag, data, cls, flow_style=cls.yaml_flow_style
)
| mit | a105bfcb1518a9a5f3afd56640ab9286 | 33.772296 | 227 | 0.577099 | 4.209418 | false | false | false | false |
crdoconnor/strictyaml | strictyaml/ruamel/loader.py | 1 | 3015 | # coding: utf-8
from __future__ import absolute_import
from strictyaml.ruamel.reader import Reader
from strictyaml.ruamel.scanner import Scanner, RoundTripScanner
from strictyaml.ruamel.parser import Parser, RoundTripParser
from strictyaml.ruamel.composer import Composer
from strictyaml.ruamel.constructor import (
BaseConstructor,
SafeConstructor,
Constructor,
RoundTripConstructor,
)
from strictyaml.ruamel.resolver import VersionedResolver
if False: # MYPY
from typing import Any, Dict, List, Union, Optional # NOQA
from strictyaml.ruamel.compat import StreamTextType, VersionType # NOQA
__all__ = ["BaseLoader", "SafeLoader", "Loader", "RoundTripLoader"]
class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, VersionedResolver):
def __init__(self, stream, version=None, preserve_quotes=None):
# type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
Reader.__init__(self, stream, loader=self)
Scanner.__init__(self, loader=self)
Parser.__init__(self, loader=self)
Composer.__init__(self, loader=self)
BaseConstructor.__init__(self, loader=self)
VersionedResolver.__init__(self, version, loader=self)
class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, VersionedResolver):
def __init__(self, stream, version=None, preserve_quotes=None):
# type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
Reader.__init__(self, stream, loader=self)
Scanner.__init__(self, loader=self)
Parser.__init__(self, loader=self)
Composer.__init__(self, loader=self)
SafeConstructor.__init__(self, loader=self)
VersionedResolver.__init__(self, version, loader=self)
class Loader(Reader, Scanner, Parser, Composer, Constructor, VersionedResolver):
def __init__(self, stream, version=None, preserve_quotes=None):
# type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
Reader.__init__(self, stream, loader=self)
Scanner.__init__(self, loader=self)
Parser.__init__(self, loader=self)
Composer.__init__(self, loader=self)
Constructor.__init__(self, loader=self)
VersionedResolver.__init__(self, version, loader=self)
class RoundTripLoader(
Reader,
RoundTripScanner,
RoundTripParser,
Composer,
RoundTripConstructor,
VersionedResolver,
):
def __init__(self, stream, version=None, preserve_quotes=None):
# type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
# self.reader = Reader.__init__(self, stream)
Reader.__init__(self, stream, loader=self)
RoundTripScanner.__init__(self, loader=self)
RoundTripParser.__init__(self, loader=self)
Composer.__init__(self, loader=self)
RoundTripConstructor.__init__(
self, preserve_quotes=preserve_quotes, loader=self
)
VersionedResolver.__init__(self, version, loader=self)
| mit | bb945a9313224d21b3850e17f890a58d | 38.671053 | 88 | 0.678939 | 3.961892 | false | false | false | false |
mnot/redbot | redbot/message/headers/pragma.py | 1 | 1808 | from redbot.message import headers
from redbot.speak import Note, categories, levels
from redbot.syntax import rfc7234
from redbot.type import AddNoteMethodType
class pragma(headers.HttpHeader):
canonical_name = "Pragma"
description = """\
The `Pragma` header is used to include implementation-specific directives that might apply to any
recipient along the request/response chain.
This header is deprecated, in favour of `Cache-Control`."""
reference = f"{rfc7234.SPEC_URL}#header.pragma"
syntax = rfc7234.Pragma
list_header = True
deprecated = True
valid_in_requests = False
valid_in_responses = True
def parse(self, field_value: str, add_note: AddNoteMethodType) -> str:
return field_value.lower()
def evaluate(self, add_note: AddNoteMethodType) -> None:
if "no-cache" in self.value:
add_note(PRAGMA_NO_CACHE)
others = [True for v in self.value if v != "no-cache"]
if others:
add_note(PRAGMA_OTHER)
class PRAGMA_NO_CACHE(Note):
category = categories.CACHING
level = levels.WARN
summary = "Pragma: no-cache is a request directive, not a response directive."
text = """\
`Pragma` is a very old request header that is sometimes used as a response header, even though
this is not specified behaviour. `Cache-Control: no-cache` is more appropriate."""
class PRAGMA_OTHER(Note):
category = categories.GENERAL
level = levels.WARN
summary = """The Pragma header is being used in an undefined way."""
text = """HTTP only defines `Pragma: no-cache`; other uses of this header are deprecated."""
class PragmaTest(headers.HeaderTest):
name = "Pragma"
inputs = [b"no-cache"]
expected_out = ["no-cache"]
expected_err = [PRAGMA_NO_CACHE, headers.HEADER_DEPRECATED]
| mit | 390095a3f4a566123864c6f6cab077ad | 33.769231 | 97 | 0.695243 | 3.704918 | false | false | false | false |
mnot/redbot | redbot/resource/fetch.py | 1 | 11403 | """
The Resource Expert Droid Fetcher.
RedFetcher fetches a single URI and analyses that response for common
problems and other interesting characteristics. It only makes one request,
based upon the provided headers.
"""
from configparser import SectionProxy
import time
from typing import Any, Dict, List, Tuple, Type, Union
import thor
from thor.http.client import HttpClientExchange
import thor.http.error as httperr
from netaddr import IPAddress # type: ignore
from redbot import __version__
from redbot.speak import Note, levels, categories
from redbot.message import HttpRequest, HttpResponse
from redbot.message.status import StatusChecker
from redbot.message.cache import check_caching
from redbot.type import StrHeaderListType, RawHeaderListType
UA_STRING = f"RED/{__version__} (https://redbot.org/)"
class RedHttpClient(thor.http.HttpClient):
"Thor HttpClient for RedFetcher"
def __init__(self, loop: thor.loop.LoopBase = None) -> None:
thor.http.HttpClient.__init__(self, loop)
self.connect_timeout = 10
self.read_timeout = 15
self.retry_delay = 1
self.careful = False
class RedFetcher(thor.events.EventEmitter):
"""
Abstract class for a fetcher.
Fetches the given URI (with the provided method, headers and content) and:
- emits 'status' and 'debug' as it progresses
- emits 'fetch_done' when the fetch is finished.
If provided, 'name' indicates the type of the request, and is used to
help set notes and status events appropriately.
"""
check_name = "undefined"
response_phrase = "undefined"
client = RedHttpClient()
client.idle_timeout = 5
def __init__(self, config: SectionProxy) -> None:
thor.events.EventEmitter.__init__(self)
self.config = config
self.notes: List[Note] = []
self.transfer_in = 0
self.transfer_out = 0
self.request = HttpRequest(self.ignore_note)
self.nonfinal_responses: List[HttpResponse] = []
self.response = HttpResponse(self.add_note)
self.exchange: HttpClientExchange = None
self.fetch_started = False
self.fetch_done = False
self.setup_check_ip()
def __getstate__(self) -> Dict[str, Any]:
state: Dict[str, Any] = thor.events.EventEmitter.__getstate__(self)
del state["exchange"]
return state
def __repr__(self) -> str:
out = [self.__class__.__name__]
if self.request.uri:
out.append(f"{self.request.uri}")
if self.fetch_started:
out.append("fetch_started")
if self.fetch_done:
out.append("fetch_done")
return f"{', '.join(out)} at {id(self):#x}>"
def add_note(self, subject: str, note: Type[Note], **kw: Union[str, int]) -> None:
"Set a note."
if "response" not in kw:
kw["response"] = self.response_phrase
self.notes.append(note(subject, kw))
@staticmethod
def ignore_note(subject: str, note: Type[Note], **kw: str) -> None:
"Ignore a note (for requests)."
return
def preflight(self) -> bool:
"""
Check to see if we should bother running. Return True
if so; False if not. Can be overridden.
"""
return True
def setup_check_ip(self) -> None:
"""
Check to see if access to this IP is allowed.
"""
if (
not self.config.getboolean("enable_local_access", fallback=False)
) and self.client.check_ip is None:
def check_ip(dns_result: str) -> bool:
addr = IPAddress(dns_result)
if (
(not addr.is_unicast())
or addr.is_private()
or addr.is_loopback()
or addr.is_link_local()
):
return False
return True
self.client.check_ip = check_ip
def set_request(
self,
iri: str,
method: str = "GET",
req_hdrs: StrHeaderListType = None,
req_body: bytes = None,
) -> None:
"""
Set the resource's request. All values are strings.
"""
self.request.method = method
self.response.is_head_response = method == "HEAD"
try:
self.request.set_iri(iri)
except httperr.UrlError as why:
self.response.http_error = why
self.response.base_uri = self.request.uri
if req_hdrs:
self.request.set_headers(req_hdrs)
self.request.payload = req_body
self.request.complete = True # cheating a bit
def check(self) -> None:
"""
Make an asynchronous HTTP request to uri, emitting 'status' as it's
updated and 'fetch_done' when it's done. Reason is used to explain what the
request is in the status callback.
"""
if not self.preflight() or self.request.uri is None:
# generally a good sign that we're not going much further.
self._fetch_done()
return
self.fetch_started = True
if "user-agent" not in [i[0].lower() for i in self.request.headers]:
self.request.headers.append(("User-Agent", UA_STRING))
self.exchange = self.client.exchange()
self.exchange.on("response_nonfinal", self._response_nonfinal)
self.exchange.once("response_start", self._response_start)
self.exchange.on("response_body", self._response_body)
self.exchange.once("response_done", self._response_done)
self.exchange.on("error", self._response_error)
self.emit("status", f"fetching {self.request.uri} ({self.check_name})")
self.emit("debug", f"fetching {self.request.uri} ({self.check_name})")
req_hdrs = [
(k.encode("ascii", "replace"), v.encode("ascii", "replace"))
for (k, v) in self.request.headers
]
self.exchange.request_start(
self.request.method.encode("ascii"),
self.request.uri.encode("ascii"),
req_hdrs,
)
self.request.start_time = time.time()
if not self.fetch_done: # the request could have immediately failed.
if self.request.payload is not None:
self.exchange.request_body(self.request.payload)
self.transfer_out += len(self.request.payload)
if not self.fetch_done: # the request could have immediately failed.
self.exchange.request_done([])
def _response_nonfinal(
self, status: bytes, phrase: bytes, res_headers: RawHeaderListType
) -> None:
"Got a non-final response."
nfres = HttpResponse(self.add_note)
nfres.process_top_line(self.exchange.res_version, status, phrase)
nfres.process_raw_headers(res_headers)
StatusChecker(nfres, self.request)
self.nonfinal_responses.append(nfres)
def _response_start(
self, status: bytes, phrase: bytes, res_headers: RawHeaderListType
) -> None:
"Process the response start-line and headers."
self.response.start_time = time.time()
self.response.process_top_line(self.exchange.res_version, status, phrase)
self.response.process_raw_headers(res_headers)
StatusChecker(self.response, self.request)
check_caching(self.response, self.request)
def _response_body(self, chunk: bytes) -> None:
"Process a chunk of the response body."
self.transfer_in += len(chunk)
self.response.feed_body(chunk)
def _response_done(self, trailers: List[Tuple[bytes, bytes]]) -> None:
"Finish analysing the response, handling any parse errors."
self.emit("debug", f"fetched {self.request.uri} ({self.check_name})")
self.response.transfer_length = self.exchange.input_transfer_length
self.response.header_length = self.exchange.input_header_length
self.response.body_done(True, trailers)
self._fetch_done()
def _response_error(self, error: httperr.HttpError) -> None:
"Handle an error encountered while fetching the response."
self.emit(
"debug",
f"fetch error {self.request.uri} ({self.check_name}) - {error.desc}",
)
err_sample = error.detail[:40] or ""
if isinstance(error, httperr.ExtraDataError):
if self.response.status_code == "304":
self.add_note("body", BODY_NOT_ALLOWED, sample=err_sample)
else:
self.add_note("body", EXTRA_DATA, sample=err_sample)
elif isinstance(error, httperr.ChunkError):
self.add_note(
"header-transfer-encoding", BAD_CHUNK, chunk_sample=err_sample
)
elif isinstance(error, httperr.HeaderSpaceError):
subject = f"header-{error.detail.lower().strip()}"
self.add_note(subject, HEADER_NAME_SPACE, header_name=error.detail)
else:
self.response.http_error = error
self._fetch_done()
def _fetch_done(self) -> None:
if not self.fetch_done:
self.fetch_done = True
self.exchange = None
self.emit("fetch_done")
class BODY_NOT_ALLOWED(Note):
category = categories.CONNECTION
level = levels.BAD
summary = "%(response)s has content."
text = """\
HTTP defines a few special situations where a response does not allow content. This includes 101,
204 and 304 responses, as well as responses to the `HEAD` method.
%(response)s had data after the headers ended, despite it being disallowed. Clients receiving it
may treat the content as the next response in the connection, leading to interoperability and
security issues.
The extra data started with:
%(sample)s
"""
class EXTRA_DATA(Note):
category = categories.CONNECTION
level = levels.BAD
summary = "%(response)s has extra data after it."
text = """\
The server sent data after the message ended. This can be caused by an incorrect `Content-Length`
header, or by a programming error in the server itself.
The extra data started with:
%(sample)s
"""
class BAD_CHUNK(Note):
category = categories.CONNECTION
level = levels.BAD
summary = "%(response)s has chunked encoding errors."
text = """\
The response indicates it uses HTTP chunked encoding, but there was a problem decoding the
chunking.
A valid chunk looks something like this:
[chunk-size in hex]\\r\\n[chunk-data]\\r\\n
However, the chunk sent started like this:
%(chunk_sample)s
This is a serious problem, because HTTP uses chunking to delimit one response from the next one;
incorrect chunking can lead to interoperability and security problems.
This issue is often caused by sending an integer chunk size instead of one in hex, or by sending
`Transfer-Encoding: chunked` without actually chunking the response body."""
class HEADER_NAME_SPACE(Note):
category = categories.CONNECTION
level = levels.BAD
summary = "%(response)s has whitespace at the end of the '%(header_name)s' header field name."
text = """\
HTTP specifically bans whitespace between header field names and the colon, because they can easily
be confused by recipients; some will strip it, and others won't, leading to a variety of attacks.
Most HTTP implementations will refuse to process this message.
"""
| mit | 7016a7e235ef60a96ac2244e29bbe447 | 35.2 | 99 | 0.633079 | 3.921252 | false | false | false | false |
mnot/redbot | redbot/webui/__init__.py | 1 | 11401 | """
A Web UI for RED, the Resource Expert Droid.
"""
from collections import defaultdict
from configparser import SectionProxy
from functools import partial
import os
import string
import sys
import time
from typing import Any, Callable, Dict, List, Tuple, Union, cast
from urllib.parse import parse_qs, urlsplit, urlencode
import thor
import thor.http.common
from thor.http import get_header
from redbot import __version__
from redbot.message import HttpRequest
from redbot.webui.captcha import CaptchaHandler
from redbot.webui.ratelimit import ratelimiter
from redbot.webui.saved_tests import (
init_save_file,
save_test,
extend_saved_test,
load_saved_test,
)
from redbot.webui.slack import slack_run, slack_auth
from redbot.resource import HttpResource
from redbot.formatter import find_formatter, html, Formatter
from redbot.formatter.html_base import e_url
from redbot.type import (
RawHeaderListType,
StrHeaderListType,
HttpResponseExchange,
)
CSP_VALUE = b"script-src 'self' 'unsafe-inline' https://hcaptcha.com https://*.hcaptcha.com; \
frame-src 'self' https://hcaptcha.com https://*.hcaptcha.com; \
style-src 'self' https://hcaptcha.com https://*.hcaptcha.com; \
connect-src 'self' https://hcaptcha.com https://*.hcaptcha.com;"
class RedWebUi:
"""
A Web UI for RED.
Given a URI, run REDbot on it and present the results to output as HTML.
If descend is true, spider the links and present a summary.
"""
def __init__(
self,
config: SectionProxy,
method: str,
query_string: bytes,
req_headers: RawHeaderListType,
req_body: bytes,
exchange: HttpResponseExchange,
error_log: Callable[[str], int] = sys.stderr.write,
) -> None:
self.config: SectionProxy = config
self.charset = self.config["charset"]
self.charset_bytes = self.charset.encode("ascii")
self.query_string = parse_qs(query_string.decode(self.charset, "replace"))
self.req_headers = req_headers
self.req_body = req_body
self.body_args = {}
self.exchange = exchange
self.error_log = error_log # function to log errors to
# query processing
self.test_uri = self.query_string.get("uri", [""])[0]
self.test_id = self.query_string.get("id", [None])[0]
self.req_hdrs: StrHeaderListType = [
tuple(h.split(":", 1)) # type: ignore
for h in self.query_string.get("req_hdr", [])
if ":" in h
]
self.format = self.query_string.get("format", ["html"])[0]
self.descend = "descend" in self.query_string
self.check_name: str = None
if not self.descend:
self.check_name = self.query_string.get("check_name", [None])[0]
self.save_path: str = None
self.timeout: Any = None
self.start = time.time()
if method == "POST":
req_ct = get_header(self.req_headers, b"content-type")
if req_ct and req_ct[-1].lower() == b"application/x-www-form-urlencoded":
self.body_args = parse_qs(req_body.decode(self.charset, "replace"))
if (
"save" in self.query_string
and self.config.get("save_dir", "")
and self.test_id
):
extend_saved_test(self)
elif "slack" in self.query_string:
slack_run(self)
elif "client_error" in self.query_string:
self.dump_client_error()
elif self.test_uri:
self.run_test()
else:
self.show_default()
elif method in ["GET", "HEAD"]:
if self.test_id:
load_saved_test(self)
elif "code" in self.query_string:
slack_auth(self)
else:
self.show_default()
else:
self.error_response(
find_formatter("html")(self.config, None, self.output),
b"405",
b"Method Not Allowed",
"Method Not Allowed",
)
def run_test(self) -> None:
"""Test a URI."""
self.test_id = init_save_file(self)
top_resource = HttpResource(self.config, descend=self.descend)
top_resource.set_request(self.test_uri, req_hdrs=self.req_hdrs)
formatter = find_formatter(self.format, "html", self.descend)(
self.config,
top_resource,
self.output,
allow_save=self.test_id,
is_saved=False,
test_id=self.test_id,
descend=self.descend,
)
continue_test = partial(self.continue_test, top_resource, formatter)
error_response = partial(self.error_response, formatter)
self.timeout = thor.schedule(
int(self.config["max_runtime"]),
self.timeout_error,
top_resource.show_task_map,
)
# referer limiting
referers = []
for hdr, value in self.req_hdrs:
if hdr.lower() == "referer":
referers.append(value)
referer_error = None
if len(referers) > 1:
referer_error = "Multiple referers not allowed."
referer_spam_domains = [
i.strip()
for i in self.config.get("referer_spam_domains", fallback="").split()
]
if (
referer_spam_domains
and referers
and urlsplit(referers[0]).hostname in referer_spam_domains
):
referer_error = "Referer not allowed."
if referer_error:
error_response(b"403", b"Forbidden", referer_error)
return
# enforce client limits
try:
ratelimiter.process(self, error_response)
except ValueError:
return # over limit, don't continue.
# hCaptcha
if self.config.get("hcaptcha_sitekey", "") and self.config.get(
"hcaptcha_secret", ""
):
CaptchaHandler(
self,
self.get_client_id(),
continue_test,
error_response,
).run()
else:
continue_test()
def continue_test(
self,
top_resource: HttpResource,
formatter: Formatter,
extra_headers: RawHeaderListType = None,
) -> None:
"Preliminary checks are done; actually run the test."
if not extra_headers:
extra_headers = []
@thor.events.on(formatter)
def formatter_done() -> None:
if self.timeout:
self.timeout.delete()
self.timeout = None
self.exchange.response_done([])
save_test(self, top_resource)
# log excessive traffic
ti = sum(
[i.transfer_in for i, t in top_resource.linked],
top_resource.transfer_in,
)
to = sum(
[i.transfer_out for i, t in top_resource.linked],
top_resource.transfer_out,
)
if ti + to > int(self.config["log_traffic"]) * 1024:
self.error_log(
f"{self.get_client_id()} "
f"{ti / 1024:n}K in "
f"{to / 1024:n}K out "
f"for <{e_url(self.test_uri)}> "
f"(descend {self.descend})"
)
self.exchange.response_start(
b"200",
b"OK",
[
(b"Content-Type", formatter.content_type()),
(b"Cache-Control", b"max-age=60, must-revalidate"),
(b"Content-Security-Policy", CSP_VALUE),
]
+ extra_headers,
)
if self.check_name:
display_resource = cast(
HttpResource, top_resource.subreqs.get(self.check_name, top_resource)
)
else:
display_resource = top_resource
formatter.bind_resource(display_resource)
top_resource.check()
def dump_client_error(self) -> None:
"""Dump a client error."""
body = self.req_body.decode("ascii", "replace")[:255].replace("\n", "")
body_safe = "".join([x for x in body if x in string.printable])
self.error_log(f"{self.get_client_id()} Client JS -> {body_safe}")
self.exchange.response_start(
b"204",
b"No Content",
[],
)
self.exchange.response_done([])
def show_default(self) -> None:
"""Show the default page."""
formatter = html.BaseHtmlFormatter(
self.config, None, self.output, is_blank=self.test_uri == ""
)
if self.test_uri:
top_resource = HttpResource(self.config, descend=self.descend)
top_resource.set_request(self.test_uri, req_hdrs=self.req_hdrs)
if self.check_name:
formatter.resource = cast(
HttpResource,
top_resource.subreqs.get(self.check_name, top_resource),
)
else:
formatter.resource = top_resource
self.exchange.response_start(
b"200",
b"OK",
[
(b"Content-Type", formatter.content_type()),
(b"Cache-Control", b"max-age=300"),
(b"Content-Security-Policy", CSP_VALUE),
],
)
formatter.start_output()
formatter.finish_output()
self.exchange.response_done([])
def error_response(
self,
formatter: Formatter,
status_code: bytes,
status_phrase: bytes,
message: str,
log_message: str = None,
) -> None:
"""Send an error response."""
if self.timeout:
self.timeout.delete()
self.timeout = None
self.exchange.response_start(
status_code,
status_phrase,
[
(b"Content-Type", formatter.content_type()),
(b"Cache-Control", b"max-age=60, must-revalidate"),
(b"Content-Security-Policy", CSP_VALUE),
],
)
formatter.start_output()
formatter.error_output(message)
self.exchange.response_done([])
if log_message:
self.error_log(f"{self.get_client_id()} {log_message}")
def output(self, chunk: str) -> None:
self.exchange.response_body(chunk.encode(self.charset, "replace"))
def timeout_error(self, detail: Callable[[], str] = None) -> None:
"""Max runtime reached."""
details = ""
if detail:
details = f"detail={detail()}"
self.error_log(
f"{self.get_client_id()} timeout: <{self.test_uri}> descend={self.descend} {details}"
)
self.output("<p class='error'>REDbot timeout.</p>")
self.exchange.response_done([])
def get_client_id(self) -> str:
"""
Figure out an identifer for the client.
"""
xff = thor.http.common.get_header(self.req_headers, b"x-forwarded-for")
if xff:
return str(xff[-1].decode("idna"))
return str(
thor.http.common.get_header(self.req_headers, b"client-ip")[-1].decode(
"idna"
)
)
| mit | 3fcf83d161f93841398a0ee72392dbc8 | 32.532353 | 97 | 0.542145 | 3.962808 | false | true | false | false |
mnot/redbot | redbot/message/headers/last_modified.py | 1 | 1133 | from redbot.message import headers
from redbot.syntax import rfc7232
from redbot.type import AddNoteMethodType
class last_modified(headers.HttpHeader):
canonical_name = "Last-Modified"
description = """\
The `Last-Modified` header indicates the time that the origin server believes the
representation was last modified."""
reference = f"{rfc7232.SPEC_URL}#header.last_modified"
syntax = False # rfc7232.Last_Modified
list_header = False
deprecated = False
valid_in_requests = False
valid_in_responses = True
def parse(self, field_value: str, add_note: AddNoteMethodType) -> int:
return headers.parse_date(field_value, add_note)
class BasicLMTest(headers.HeaderTest):
name = "Last-Modified"
inputs = [b"Mon, 04 Jul 2011 09:08:06 GMT"]
expected_out = 1309770486
class BadLMTest(headers.HeaderTest):
name = "Last-Modified"
inputs = [b"0"]
expected_out = None
expected_err = [headers.BAD_DATE_SYNTAX]
class BlankLMTest(headers.HeaderTest):
name = "Last-Modified"
inputs = [b""]
expected_out = None
expected_err = [headers.BAD_DATE_SYNTAX]
| mit | 0734099cffe2bd09d5d965c8e4b34eee | 28.051282 | 81 | 0.70256 | 3.486154 | false | true | false | false |
mnot/redbot | redbot/syntax/rfc7233.py | 1 | 2813 | """
Regex for RFC7233
These regex are directly derived from the collected ABNF in RFC7233.
<http://httpwg.org/specs/rfc7233.html#collected.abnf>
They should be processed with re.VERBOSE.
"""
# pylint: disable=invalid-name
from .rfc5234 import DIGIT, SP, VCHAR
from .rfc7230 import list_rule, token
from .rfc7231 import HTTP_date
from .rfc7232 import entity_tag
SPEC_URL = "http://httpwg.org/specs/rfc7233"
# bytes-unit = "bytes"
bytes_unit = r"bytes"
# other-range-unit = token
other_range_unit = token
# range-unit = bytes-unit / other-range-unit
range_unit = rf"(?: {bytes_unit} | {other_range_unit} )"
# acceptable-ranges = 1#range-unit / "none"
acceptable_ranges = rf"(?: {list_rule(range_unit, 1)} | none )"
# Accept-Ranges = acceptable-ranges
Accept_Ranges = acceptable_ranges
# first-byte-pos = 1*DIGIT
first_byte_pos = rf"{DIGIT}+"
# last-byte-pos = 1*DIGIT
last_byte_pos = rf"{DIGIT}+"
# byte-range = first-byte-pos "-" last-byte-pos
byte_range = rf"(?: {first_byte_pos} \- {last_byte_pos} )"
# complete-length = 1*DIGIT
complete_length = rf"{DIGIT}+"
# byte-range-resp = byte-range "/" ( complete-length / "*" )
byte_range_resp = rf"(?: {byte_range} / (?: {complete_length} | \* ) )"
# unsatisfied-range = "*/" complete-length
unsatisfied_range = rf"(?: \*/ {complete_length} )"
# byte-content-range = bytes-unit SP ( byte-range-resp / unsatisfied-range )
byte_content_range = (
rf"(?: {bytes_unit} {SP} (?: {byte_range_resp} | {unsatisfied_range} ) )"
)
# other-range-resp = *CHAR
other_range_resp = rf"{VCHAR}*"
# other-content-range = other-range-unit SP other-range-resp
other_content_range = rf"(?: {other_range_unit} {SP} {other_range_resp} )"
# Content-Range = byte-content-range / other-content-range
Content_Range = rf"(?: {byte_content_range} | {other_content_range} )"
# If-Range = entity-tag / HTTP-date
If_Range = rf"(?: {entity_tag} | {HTTP_date} )"
# suffix-length = 1*DIGIT
suffix_length = rf"{DIGIT}+"
# suffix-byte-range-spec = "-" suffix-length
suffix_byte_range_spec = rf"(?: \- {suffix_length} )+"
# byte-range-spec = first-byte-pos "-" [ last-byte-pos ]
byte_range_spec = rf"(?: {first_byte_pos} \- {last_byte_pos} )+"
# byte-range-set = 1#( byte-range-spec / suffix-byte-range-spec )
byte_range_set = list_rule(rf"(?: {byte_range_spec} | {suffix_byte_range_spec} )", 1)
# byte-ranges-specifier = bytes-unit "=" byte-range-set
byte_ranges_specifier = rf"(?: {bytes_unit} = {byte_range_set} )+"
# other-range-set = 1*VCHAR
other_range_set = rf"{VCHAR}+"
# other-ranges-specifier = other-range-unit "=" other-range-set
other_ranges_specifier = rf"(?: {other_range_unit} = {other_range_set} )+"
# Range = byte-ranges-specifier / other-ranges-specifier
Range = rf"(?: {byte_ranges_specifier} | {other_ranges_specifier} )"
| mit | c72bdfc6420ecef4ddb5a614702feeab | 22.838983 | 85 | 0.661571 | 2.725775 | false | false | false | false |
mnot/redbot | redbot/resource/active_check/base.py | 1 | 3361 | """
Subrequests to do things like range requests, content negotiation checks,
and validation.
This is the base class for all subrequests.
"""
from abc import ABCMeta, abstractmethod
from configparser import SectionProxy
from typing import List, Type, Union, TYPE_CHECKING
from redbot.resource.fetch import RedFetcher
from redbot.speak import Note, levels, categories
from redbot.type import StrHeaderListType
if TYPE_CHECKING:
from redbot.resource import HttpResource # pylint: disable=cyclic-import
class SubRequest(RedFetcher, metaclass=ABCMeta):
"""
Base class for a subrequest of a "main" HttpResource, made to perform
additional behavioural tests on the resource.
"""
check_name = "undefined"
response_phrase = "undefined"
def __init__(self, config: SectionProxy, base_resource: "HttpResource") -> None:
self.config = config
self.base: HttpResource = ( # pylint: disable=used-before-assignment
base_resource
)
RedFetcher.__init__(self, config)
self.check_done = False
self.on("fetch_done", self._check_done)
@abstractmethod
def done(self) -> None:
"""The subrequest is done, process it. Must be overridden."""
raise NotImplementedError
def _check_done(self) -> None:
if self.preflight():
self.done()
self.check_done = True
self.emit("check_done")
def check(self) -> None:
modified_headers = self.modify_request_headers(list(self.base.request.headers))
RedFetcher.set_request(
self,
self.base.request.uri,
self.base.request.method,
modified_headers,
self.base.request.payload,
)
RedFetcher.check(self)
@abstractmethod
def modify_request_headers(
self, base_headers: StrHeaderListType
) -> StrHeaderListType:
"""Usually overridden; modifies the request headers."""
return base_headers
def add_base_note(
self, subject: str, note: Type[Note], **kw: Union[str, int]
) -> None:
"Add a Note to the base resource."
kw["response"] = self.response_phrase
self.base.add_note(subject, note, **kw)
def check_missing_hdrs(self, hdrs: List[str], note: Type[Note]) -> None:
"""
See if the listed headers are missing in the subrequest; if so,
set the specified note.
"""
missing_hdrs = []
for hdr in hdrs:
if (
hdr in self.base.response.parsed_headers
and hdr not in self.response.parsed_headers
):
missing_hdrs.append(hdr)
if missing_hdrs:
self.add_base_note("headers", note, missing_hdrs=", ".join(missing_hdrs))
self.add_note("headers", note, missing_hdrs=", ".join(missing_hdrs))
class MISSING_HDRS_304(Note):
category = categories.VALIDATION
level = levels.WARN
summary = "%(response)s is missing required headers."
text = """\
HTTP requires `304 Not Modified` responses to have certain headers, if they are also present in a
normal (e.g., `200 OK` response).
%(response)s is missing the following headers: `%(missing_hdrs)s`.
This can affect cache operation; because the headers are missing, caches might remove them from
their cached copies."""
| mit | e5801ef97667a4005d434fc876ab3db0 | 31.95098 | 97 | 0.640583 | 4.044525 | false | false | false | false |
mnot/redbot | redbot/formatter/text.py | 1 | 6699 | """
HAR Formatter for REDbot.
"""
from html.parser import HTMLParser
import operator
import re
import textwrap
from typing import Any, List
import thor.http.error as httperr
from redbot.formatter import Formatter
from redbot.message import HttpResponse
from redbot.resource import HttpResource
from redbot.speak import Note, levels, categories
NL = "\n"
class BaseTextFormatter(Formatter):
"""
Base class for text formatters."""
media_type = "text/plain"
note_categories = [
categories.GENERAL,
categories.SECURITY,
categories.CONNECTION,
categories.CONNEG,
categories.CACHING,
categories.VALIDATION,
categories.RANGE,
]
link_order = [
("link", "Head Links"),
("script", "Script Links"),
("frame", "Frame Links"),
("iframe", "IFrame Links"),
("img", "Image Links"),
]
error_template = "Error: %s\n"
def __init__(self, *args: Any, **kw: Any) -> None:
Formatter.__init__(self, *args, **kw)
self.verbose = False
def start_output(self) -> None:
pass
def feed(self, sample: bytes) -> None:
pass
def status(self, status: str) -> None:
pass
def finish_output(self) -> None:
"Fill in the template with RED's results."
if self.resource.response.complete:
self.output(
NL.join(
[self.format_headers(r) for r in self.resource.nonfinal_responses]
)
+ NL
+ NL
)
self.output(self.format_headers(self.resource.response) + NL + NL)
self.output(self.format_recommendations(self.resource) + NL)
else:
if self.resource.response.http_error is None:
pass
elif isinstance(self.resource.response.http_error, httperr.HttpError):
self.output(
self.error_template % self.resource.response.http_error.desc
)
else:
raise AssertionError("Unknown incomplete response error.")
def error_output(self, message: str) -> None:
self.output(self.error_template % message)
@staticmethod
def format_headers(response: HttpResponse) -> str:
out = [
f"HTTP/{response.version} {response.status_code} {response.status_phrase}"
]
return NL.join(out + [f"{h[0]}:{h[1]}" for h in response.headers])
def format_recommendations(self, resource: HttpResource) -> str:
return "".join(
[
self.format_recommendation(resource, category)
for category in self.note_categories
]
)
def format_recommendation(
self, resource: HttpResource, category: categories
) -> str:
notes = [note for note in resource.notes if note.category == category]
if not notes:
return ""
out = []
if list(notes):
out.append(f"* {category.value}:")
for note in notes:
out.append(f" * {self.colorize(note.level, note.show_summary('en'))}")
if self.verbose:
out.append("")
out.extend(" " + line for line in self.format_text(note))
out.append("")
out.append(NL)
return NL.join(out)
@staticmethod
def format_text(note: Note) -> List[str]:
return textwrap.wrap(
strip_tags(re.sub(r"(?m)\s\s+", " ", note.show_text("en")))
)
def colorize(self, level: levels, instr: str) -> str:
if self.kw.get("tty_out", False):
# info
color_start = "\033[0;32m"
color_end = "\033[0;39m"
if level == levels.GOOD:
color_start = "\033[1;32m"
color_end = "\033[0;39m"
if level == levels.BAD:
color_start = "\033[1;31m"
color_end = "\033[0;39m"
if level == levels.WARN:
color_start = "\033[1;33m"
color_end = "\033[0;39m"
else:
color_start = "\033[1;34m"
color_end = "\033[0;39m"
return color_start + instr + color_end
return instr
class TextFormatter(BaseTextFormatter):
"""
Format a REDbot object as text.
"""
name = "txt"
media_type = "text/plain"
def __init__(self, *args: Any, **kw: Any) -> None:
BaseTextFormatter.__init__(self, *args, **kw)
def finish_output(self) -> None:
BaseTextFormatter.finish_output(self)
class VerboseTextFormatter(TextFormatter):
name = "txt_verbose"
def __init__(self, *args: Any, **kw: Any) -> None:
TextFormatter.__init__(self, *args, **kw)
self.verbose = True
class TextListFormatter(BaseTextFormatter):
"""
Format multiple REDbot responses as a textual list.
"""
name = "text"
media_type = "text/plain"
can_multiple = True
def __init__(self, *args: Any, **kw: Any) -> None:
BaseTextFormatter.__init__(self, *args, **kw)
def finish_output(self) -> None:
"Fill in the template with RED's results."
BaseTextFormatter.finish_output(self)
sep = "=" * 78
for hdr_tag, heading in self.link_order:
subresources = [d[0] for d in self.resource.linked if d[1] == hdr_tag]
self.output(f"{sep}{NL}{heading} ({len(subresources)}){NL}{sep}{NL}")
if subresources:
subresources.sort(key=operator.attrgetter("request.uri"))
for subresource in subresources:
self.output(self.format_uri(subresource) + NL + NL)
self.output(self.format_headers(subresource.response) + NL + NL)
self.output(self.format_recommendations(subresource) + NL + NL)
def format_uri(self, resource: HttpResource) -> str:
return self.colorize(None, resource.request.uri)
class VerboseTextListFormatter(TextListFormatter):
name = "txt_verbose"
def __init__(self, *args: Any, **kw: Any) -> None:
TextListFormatter.__init__(self, *args, **kw)
self.verbose = True
class MLStripper(HTMLParser):
def __init__(self) -> None:
HTMLParser.__init__(self)
self.reset()
self.fed: List[str] = []
def handle_data(self, data: str) -> None:
self.fed.append(data)
def get_data(self) -> str:
return "".join(self.fed)
def error(self, message: str) -> None:
pass
def strip_tags(html: str) -> str:
stripper = MLStripper()
stripper.feed(html)
return stripper.get_data()
| mit | 3a70f904a1ed0cbb7d43bb633f6206f5 | 28.773333 | 86 | 0.559039 | 3.828 | false | false | false | false |
mnot/redbot | redbot/message/headers/date.py | 1 | 1158 | from redbot.message import headers
from redbot.syntax import rfc7231
from redbot.type import AddNoteMethodType
class date(headers.HttpHeader):
canonical_name = "Date"
description = """\
The `Date` header represents the time when the message was generated, regardless of caching that
happened since.
It is used by caches as input to expiration calculations, and to detect clock drift."""
reference = f"{rfc7231.SPEC_URL}#header.date"
syntax = False # rfc7231.Date
list_header = False
deprecated = False
valid_in_requests = True
valid_in_responses = True
def parse(self, field_value: str, add_note: AddNoteMethodType) -> int:
return headers.parse_date(field_value, add_note)
class BasicDateTest(headers.HeaderTest):
name = "Date"
inputs = [b"Mon, 04 Jul 2011 09:08:06 GMT"]
expected_out = 1309770486
class BadDateTest(headers.HeaderTest):
name = "Date"
inputs = [b"0"]
expected_out = None
expected_err = [headers.BAD_DATE_SYNTAX]
class BlankDateTest(headers.HeaderTest):
name = "Date"
inputs = [b""]
expected_out = None
expected_err = [headers.BAD_DATE_SYNTAX]
| mit | 432be12625cff759a53c953fc0fb3669 | 27.243902 | 96 | 0.698618 | 3.498489 | false | true | false | false |
bambinos/bambi | bambi/tests/test_utils.py | 1 | 1143 | import pytest
import numpy as np
import pandas as pd
from bambi.utils import censored, listify
from bambi.backend.pymc import probit, cloglog
from bambi.utils import censored
def test_listify():
assert listify(None) == []
assert listify([1, 2, 3]) == [1, 2, 3]
assert listify("giraffe") == ["giraffe"]
def test_probit():
x = probit(np.random.normal(scale=10000, size=100)).eval()
assert (x > 0).all() and (x < 1).all()
def test_cloglog():
x = cloglog(np.random.normal(scale=10000, size=100)).eval()
assert (x > 0).all() and (x < 1).all()
@pytest.mark.skip(reason="Censored still not ported")
def test_censored():
df = pd.DataFrame(
{
"x": [1, 2, 3, 4, 5],
"y": [2, 3, 4, 5, 6],
"status": ["none", "right", "interval", "left", "none"],
}
)
df_bad = pd.DataFrame({"x": [1, 2], "status": ["foo", "bar"]})
x = censored(df["x"], df["status"])
assert x.shape == (5, 2)
x = censored(df["x"], df["y"], df["status"])
assert x.shape == (5, 3)
with pytest.raises(AssertionError):
censored(df_bad["x"], df_bad["status"])
| mit | bae126c82d888431e246cc5539f3af36 | 23.847826 | 68 | 0.56168 | 2.864662 | false | true | false | false |
bambinos/bambi | setup.py | 1 | 1990 | import os
import sys
from setuptools import find_packages, setup
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
README_FILE = os.path.join(PROJECT_ROOT, "README.md")
VERSION_FILE = os.path.join(PROJECT_ROOT, "bambi", "version.py")
REQUIREMENTS_FILE = os.path.join(PROJECT_ROOT, "requirements.txt")
OPTIONAL_REQUIREMENTS_FILE = os.path.join(PROJECT_ROOT, "requirements-optional.txt")
MINIMUM_PYTHON_VERSION = (3, 7, 2)
def check_installation():
if sys.version_info < MINIMUM_PYTHON_VERSION:
version = ".".join(str(i) for i in MINIMUM_PYTHON_VERSION)
sys.stderr.write(
f"[{sys.argv[0]}] - Error: Your Python interpreter must be {version} or greater.\n"
)
sys.exit(-1)
def get_long_description():
with open(README_FILE, encoding="utf-8") as buff:
return buff.read()
def get_requirements():
with open(REQUIREMENTS_FILE, encoding="utf-8") as buff:
return buff.read().splitlines()
def get_optional_requirements():
with open(OPTIONAL_REQUIREMENTS_FILE, encoding="utf-8") as buff:
return buff.read().splitlines()
def get_version():
with open(VERSION_FILE, encoding="utf-8") as buff:
exec(buff.read()) # pylint: disable=exec-used
return vars()["__version__"]
check_installation()
__version__ = get_version()
setup(
name="bambi",
version=__version__,
description="BAyesian Model Building Interface in Python",
long_description=get_long_description(),
long_description_content_type="text/markdown",
url="http://github.com/bambinos/bambi",
download_url="https://github.com/bambinos/bambi/archive/%s.tar.gz" % __version__,
install_requires=get_requirements(),
extras_require={
"jax": [get_optional_requirements()],
},
maintainer="Tomas Capretto",
maintainer_email="tomicapretto@gmail.com",
packages=find_packages(exclude=["tests", "test_*"]),
package_data={"bambi": ["priors/config/*"]},
license="MIT",
)
| mit | e6c7cea0737eda50794bc89ff9ecdb1e | 29.151515 | 95 | 0.666332 | 3.425129 | false | false | false | false |
bambinos/bambi | bambi/backend/pymc.py | 1 | 22246 | import functools
import logging
import traceback
from copy import deepcopy
import numpy as np
import pymc as pm
import aesara.tensor as at
from bambi import version
from bambi.backend.links import cloglog, identity, inverse_squared, logit, probit, arctan_2
from bambi.backend.terms import CommonTerm, GroupSpecificTerm, InterceptTerm, ResponseTerm
from bambi.families.multivariate import MultivariateFamily
_log = logging.getLogger("bambi")
class PyMCModel:
"""PyMC model-fitting backend."""
INVLINKS = {
"cloglog": cloglog,
"identity": identity,
"inverse_squared": inverse_squared,
"inverse": at.reciprocal,
"log": at.exp,
"logit": logit,
"probit": probit,
"tan_2": arctan_2,
"softmax": functools.partial(at.nnet.softmax, axis=-1),
}
def __init__(self):
self.name = pm.__name__
self.version = pm.__version__
# Attributes defined elsewhere
self._design_matrix_without_intercept = None
self.vi_approx = None
self.coords = {}
self.fit = False
self.has_intercept = False
self.model = None
self.mu = None
self.spec = None
def build(self, spec):
"""Compile the PyMC model from an abstract model specification.
Parameters
----------
spec: bambi.Model
A Bambi ``Model`` instance containing the abstract specification of the model
to compile.
"""
self.model = pm.Model()
self.has_intercept = spec.intercept_term is not None
self.mu = 0.0
for name, values in spec.response.coords.items():
if name not in self.model.coords:
self.model.add_coords({name: values})
self.coords.update(**spec.response.coords)
with self.model:
self._build_intercept(spec)
self._build_offsets(spec)
self._build_common_terms(spec)
self._build_group_specific_terms(spec)
self._build_response(spec)
self._build_potentials(spec)
self.spec = spec
def run(
self,
draws=1000,
tune=1000,
discard_tuned_samples=True,
omit_offsets=True,
include_mean=False,
inference_method="mcmc",
init="auto",
n_init=50000,
chains=None,
cores=None,
random_seed=None,
**kwargs,
):
"""Run PyMC sampler."""
inference_method = inference_method.lower()
# NOTE: Methods return different types of objects (idata, approximation, and dictionary)
if inference_method in ["mcmc", "nuts_numpyro", "nuts_blackjax"]:
result = self._run_mcmc(
draws,
tune,
discard_tuned_samples,
omit_offsets,
include_mean,
init,
n_init,
chains,
cores,
random_seed,
inference_method,
**kwargs,
)
elif inference_method == "vi":
result = self._run_vi(**kwargs)
elif inference_method == "laplace":
result = self._run_laplace(draws, omit_offsets, include_mean)
else:
raise NotImplementedError(f"{inference_method} method has not been implemented")
self.fit = True
return result
def _build_intercept(self, spec):
"""Add intercept term to the PyMC model.
We have linear predictors of the form 'X @ b + Z @ u'. This is technically part of
'X @ b' but it is added separately for convenience reasons.
Parameters
----------
spec : bambi.Model
The model.
"""
if self.has_intercept:
self.mu += InterceptTerm(spec.intercept_term).build(spec)
def _build_common_terms(self, spec):
"""Add common (fixed) terms to the PyMC model.
We have linear predictors of the form 'X @ b + Z @ u'.
This creates the 'b' parameter vector in PyMC, computes `X @ b`, and adds it to ``self.mu``.
Parameters
----------
spec : bambi.Model
The model.
"""
if spec.common_terms:
coefs = []
columns = []
for term in spec.common_terms.values():
common_term = CommonTerm(term)
# Add coords
for name, values in common_term.coords.items():
if name not in self.model.coords:
self.model.add_coords({name: values})
self.coords.update(**common_term.coords)
# Build
coef, data = common_term.build(spec)
coefs.append(coef)
columns.append(data)
# Column vector of coefficients and design matrix
coefs = at.concatenate(coefs)
# Design matrix
data = np.column_stack(columns)
# If there's an intercept, center the data
# Also store the design matrix without the intercept to uncenter the intercept later
if self.has_intercept:
self._design_matrix_without_intercept = data
data = data - data.mean(0)
# Add term to linear predictor
self.mu += at.dot(data, coefs)
def _build_group_specific_terms(self, spec):
"""Add group-specific (random or varying) terms to the PyMC model.
We have linear predictors of the form 'X @ b + Z @ u'.
This creates the 'u' parameter vector in PyMC, computes `Z @ u`, and adds it to ``self.mu``.
Parameters
----------
spec : bambi.Model
The model.
"""
# Add group specific terms that have prior for their correlation matrix
for group, eta in spec.priors_cor.items():
# pylint: disable=protected-access
terms = [spec.terms[name] for name in spec._get_group_specific_groups()[group]]
self.mu += add_lkj(self, terms, eta)
terms = [
term
for term in spec.group_specific_terms.values()
if term.name.split("|")[1] not in spec.priors_cor
]
for term in terms:
group_specific_term = GroupSpecificTerm(term, spec.noncentered)
# Add coords
for name, values in group_specific_term.coords.items():
if name not in self.model.coords:
self.model.add_coords({name: values})
self.coords.update(**group_specific_term.coords)
# Build
coef, predictor = group_specific_term.build(spec)
# Add to the linear predictor
# The loop through predictor columns is not the most beautiful alternative.
# But it's the fastest. Doing matrix multiplication, pm.math.dot(data, coef), is slower.
if predictor.ndim > 1:
for col in range(predictor.shape[1]):
self.mu += coef[:, col] * predictor[:, col]
elif isinstance(spec.family, MultivariateFamily):
self.mu += coef * predictor[:, np.newaxis]
else:
self.mu += coef * predictor
def _build_offsets(self, spec):
"""Add offset terms to the PyMC model.
Offsets are terms with a regression coefficient of 1.
This is technically part of 'X @ b' in the linear predictor 'X @ b + Z @ u'.
It's added here so we avoid the creation of a constant variable in PyMC.
Parameters
----------
spec : bambi.Model
The model.
"""
for offset in spec.offset_terms.values():
self.mu += offset.data.squeeze()
def _build_response(self, spec):
"""Add response term to the PyMC model
Parameters
----------
spec : bambi.Model
The model.
"""
ResponseTerm(spec.response, spec.family).build(self.mu, self.INVLINKS)
def _build_potentials(self, spec):
"""Add potentials to the PyMC model.
Potentials are arbitrary quantities that are added to the model log likelihood.
See 'Factor Potentials' in
https://github.com/fonnesbeck/probabilistic_python/blob/main/pymc_intro.ipynb
Parameters
----------
spec : bambi.Model
The model.
"""
if spec.potentials is not None:
count = 0
for variable, constraint in spec.potentials:
if isinstance(variable, (list, tuple)):
lambda_args = [self.model[var] for var in variable]
potential = constraint(*lambda_args)
else:
potential = constraint(self.model[variable])
pm.Potential(f"pot_{count}", potential)
count += 1
def _run_mcmc(
self,
draws=1000,
tune=1000,
discard_tuned_samples=True,
omit_offsets=True,
include_mean=False,
init="auto",
n_init=50000,
chains=None,
cores=None,
random_seed=None,
sampler_backend="mcmc",
**kwargs,
):
with self.model:
if sampler_backend == "mcmc":
try:
idata = pm.sample(
draws=draws,
tune=tune,
discard_tuned_samples=discard_tuned_samples,
init=init,
n_init=n_init,
chains=chains,
cores=cores,
random_seed=random_seed,
**kwargs,
)
except (RuntimeError, ValueError):
if (
"ValueError: Mass matrix contains" in traceback.format_exc()
and init == "auto"
):
_log.info(
"\nThe default initialization using init='auto' has failed, trying to "
"recover by switching to init='adapt_diag'",
)
idata = pm.sample(
draws=draws,
tune=tune,
discard_tuned_samples=discard_tuned_samples,
init="adapt_diag",
n_init=n_init,
chains=chains,
cores=cores,
random_seed=random_seed,
**kwargs,
)
else:
raise
elif sampler_backend == "nuts_numpyro":
# Lazy import to not force users to install Jax
import pymc.sampling_jax # pylint: disable=import-outside-toplevel
if not chains:
chains = (
4 # sample_numpyro_nuts does not handle chains = None like pm.sample does
)
idata = pymc.sampling_jax.sample_numpyro_nuts(
draws=draws,
tune=tune,
chains=chains,
random_seed=random_seed,
**kwargs,
)
elif sampler_backend == "nuts_blackjax":
# Lazy import to not force users to install Jax
import pymc.sampling_jax # pylint: disable=import-outside-toplevel
if not chains:
chains = (
4 # sample_blackjax_nuts does not handle chains = None like pm.sample does
)
idata = pymc.sampling_jax.sample_blackjax_nuts(
draws=draws,
tune=tune,
chains=chains,
random_seed=random_seed,
**kwargs,
)
else:
raise ValueError(
f"sampler_backend value {sampler_backend} is not valid. Please choose one of"
f"``mcmc``, ``nuts_numpyro`` or ``nuts_blackjax``"
)
idata = self._clean_results(idata, omit_offsets, include_mean)
return idata
def _clean_results(self, idata, omit_offsets, include_mean):
for group in idata.groups():
getattr(idata, group).attrs["modeling_interface"] = "bambi"
getattr(idata, group).attrs["modeling_interface_version"] = version.__version__
if omit_offsets:
offset_vars = [var for var in idata.posterior.var() if var.endswith("_offset")]
idata.posterior = idata.posterior.drop_vars(offset_vars)
# Drop variables and dimensions associated with LKJ prior
vars_to_drop = [var for var in idata.posterior.var() if var.startswith("_LKJ")]
dims_to_drop = [dim for dim in idata.posterior.dims if dim.startswith("_LKJ")]
idata.posterior = idata.posterior.drop_vars(vars_to_drop)
idata.posterior = idata.posterior.drop_dims(dims_to_drop)
# Drop and reorder coords
# About coordinates ending with "_dim_0"
# Coordinates that end with "_dim_0" are added automatically.
# These represents unidimensional coordinates that are added for numerical variables.
# These variables have a shape of 1 so we can concatenate the coefficients and multiply
# the resulting vector with the design matrix.
# But having a unidimensional coordinate for a numeric variable does not make sense.
# So we drop them.
coords_to_drop = [dim for dim in idata.posterior.dims if dim.endswith("_dim_0")]
idata.posterior = idata.posterior.squeeze(coords_to_drop).reset_coords(
coords_to_drop, drop=True
)
# This does not add any new coordinate, it just changes the order so the ones
# ending in "__factor_dim" are placed after the others.
dims_original = list(self.coords)
dims_group = [c for c in dims_original if c.endswith("__factor_dim")]
# Keep the original order in dims_original
dims_original_set = set(dims_original) - set(dims_group)
dims_original = [c for c in dims_original if c in dims_original_set]
dims_new = ["chain", "draw"] + dims_original + dims_group
idata.posterior = idata.posterior.transpose(*dims_new)
# Compute the actual intercept
if self.has_intercept and self.spec.common_terms:
chain_n = len(idata.posterior["chain"])
draw_n = len(idata.posterior["draw"])
shape = (chain_n, draw_n)
dims = ["chain", "draw"]
# Design matrix without intercept
X = self._design_matrix_without_intercept
# Re-scale intercept for centered predictors
common_terms = []
for term in self.spec.common_terms.values():
if term.alias:
common_terms += [term.alias]
else:
common_terms += [term.name]
if self.spec.response.coords:
# Grab the first object in a dictionary
levels = list(self.spec.response.coords.values())[0]
shape += (len(levels),)
dims += list(self.spec.response.coords)
posterior = idata.posterior.stack(samples=dims)
coefs = np.vstack([np.atleast_2d(posterior[name].values) for name in common_terms])
if self.spec.intercept_term.alias:
intercept_name = self.spec.intercept_term.alias
else:
intercept_name = self.spec.intercept_term.name
idata.posterior[intercept_name] = idata.posterior[intercept_name] - np.dot(
X.mean(0), coefs
).reshape(shape)
if include_mean:
self.spec.predict(idata)
return idata
def _run_vi(self, **kwargs):
with self.model:
self.vi_approx = pm.fit(**kwargs)
return self.vi_approx
def _run_laplace(self, draws, omit_offsets, include_mean):
"""Fit a model using a Laplace approximation.
Mainly for pedagogical use, provides reasonable results for approximately
Gaussian posteriors. The approximation can be very poor for some models
like hierarchical ones. Use ``mcmc``, ``nuts_numpyro``, ``nuts_blackjax``
or ``vi`` for better approximations.
Parameters
----------
draws: int
The number of samples to draw from the posterior distribution.
omit_offsets: bool
Omits offset terms in the ``InferenceData`` object returned when the model includes
group specific effects.
include_mean: bool
Compute the posterior of the mean response.
Returns
-------
An ArviZ's InferenceData object.
"""
with self.model:
maps = pm.find_MAP()
n_maps = deepcopy(maps)
for m in maps:
if pm.util.is_transformed_name(m):
n_maps.pop(pm.util.get_untransformed_name(m))
hessian = pm.find_hessian(n_maps)
if np.linalg.det(hessian) == 0:
raise np.linalg.LinAlgError("Singular matrix. Use mcmc or vi method")
cov = np.linalg.inv(hessian)
modes = np.concatenate([np.atleast_1d(v) for v in n_maps.values()])
samples = np.random.multivariate_normal(modes, cov, size=draws)
idata = _posterior_samples_to_idata(samples, self.model)
idata = self._clean_results(idata, omit_offsets, include_mean)
return idata
def _posterior_samples_to_idata(samples, model):
"""Create InferenceData from samples.
Parameters
----------
samples: array
Posterior samples
model: PyMC model
Returns
-------
An ArviZ's InferenceData object.
"""
initial_point = model.initial_point(seed=None)
variables = model.value_vars
var_info = {}
for name, value in initial_point.items():
var_info[name] = (value.shape, value.size)
length_pos = len(samples)
varnames = [v.name for v in variables]
with model:
strace = pm.backends.ndarray.NDArray(name=model.name) # pylint:disable=no-member
strace.setup(length_pos, 0)
for i in range(length_pos):
value = []
size = 0
for varname in varnames:
shape, new_size = var_info[varname]
var_samples = samples[i][size : size + new_size]
value.append(var_samples.reshape(shape))
size += new_size
strace.record(point=dict(zip(varnames, value)))
idata = pm.to_inference_data(pm.backends.base.MultiTrace([strace]), model=model)
return idata
def add_lkj(backend, terms, eta=1):
"""Add correlated prior for group-specific effects.
This function receives a list of group-specific terms that share their `grouper`, constructs
a multivariate Normal prior with LKJ prior on the correlation matrix, and adds the necessary
variables to the model. It uses a non-centered parametrization.
Parameters
----------
terms: list
A list of terms that share a common grouper (i.e. ``1|Group`` and ``Variable|Group`` in
formula notation).
eta: num
The value for the eta parameter in the LKJ distribution.
Parameters
----------
mu
The contribution to the linear predictor of the roup-specific terms in ``terms``.
"""
# Parameters
# grouper: The name of the grouper.build_group_specific_distribution
# rows: Sum of the number of columns in all the "Xi" matrices for a given grouper.
# Same than the order of L
# cols: Number of groups in the grouper variable
mu = 0
grouper = terms[0].name.split("|")[1]
rows = int(np.sum([term.predictor.shape[1] for term in terms]))
cols = int(terms[0].grouper.shape[1]) # not the most beautiful, but works
# Construct sigma
# Horizontally stack the sigma values for all the hyperpriors
sigma = np.hstack([term.prior.args["sigma"].args["sigma"] for term in terms])
# Reconstruct the hyperprior for the standard deviations, using one variable
sigma = pm.HalfNormal.dist(sigma=sigma, shape=rows)
# Obtain Cholesky factor for the covariance
# pylint: disable=unused-variable, disable=unpacking-non-sequence
(lkj_decomp, corr, sigma,) = pm.LKJCholeskyCov(
"_LKJCholeskyCov_" + grouper,
n=rows,
eta=eta,
sd_dist=sigma,
compute_corr=True,
store_in_trace=False,
)
coefs_offset = pm.Normal("_LKJ_" + grouper + "_offset", mu=0, sigma=1, shape=(rows, cols))
coefs = at.dot(lkj_decomp, coefs_offset).T
## Separate group-specific terms
start = 0
for term in terms:
label = term.name
dims = list(term.coords)
# Add coordinates to the model, only if they are not added yet.
for name, values in term.coords.items():
if name not in backend.model.coords:
backend.model.add_coords({name: values})
backend.coords.update(**term.coords)
predictor = term.predictor.squeeze()
delta = term.predictor.shape[1]
if delta == 1:
idx = start
else:
idx = slice(start, start + delta)
# Add prior for the parameter
coef = pm.Deterministic(label, coefs[:, idx], dims=dims)
coef = coef[term.group_index]
# Add standard deviation of the hyperprior distribution
group_dim = [dim for dim in dims if dim.endswith("_group_expr")]
pm.Deterministic(label + "_sigma", sigma[idx], dims=group_dim)
# Account for the contribution of the term to the linear predictor
if predictor.ndim > 1:
for col in range(predictor.shape[1]):
mu += coef[:, col] * predictor[:, col]
else:
mu += coef * predictor
start += delta
# TO DO: Add correlations
return mu
| mit | fd9b994edc02435f895b5cd93c07fd3e | 34.996764 | 100 | 0.556594 | 4.2438 | false | false | false | false |
bambinos/bambi | bambi/families/family.py | 1 | 2700 | from bambi.families.link import Link
class Family:
"""A specification of model family.
Parameters
----------
name: str
The name of the family. It can be any string.
likelihood: Likelihood
A ``bambi.families.Likelihood`` instance specifying the model likelihood function.
link: str or Link
The name of the link function or a ``bambi.families.Link`` instance. The link function
transforms the linear model prediction to the mean parameter of the likelihood function.
Examples
--------
>>> import bambi as bmb
Replicate the Gaussian built-in family.
>>> sigma_prior = bmb.Prior("HalfNormal", sigma=1)
>>> likelihood = bmb.Likelihood("Gaussian", parent="mu", sigma=sigma_prior)
>>> family = bmb.Family("gaussian", likelihood, "identity")
>>> # Then you can do
>>> # bmb.Model("y ~ x", data, family=family)
Replicate the Bernoulli built-in family.
>>> likelihood = bmb.Likelihood("Bernoulli", parent="p")
>>> family = bmb.Family("bernoulli", likelihood, "logit")
"""
SUPPORTED_LINKS = [
"cloglog",
"identity",
"inverse_squared",
"inverse",
"log",
"logit",
"probit",
"softmax",
"tan_2",
]
def __init__(self, name, likelihood, link):
self.name = name
self.likelihood = likelihood
self.link = link
self.aliases = {}
@property
def link(self):
return self._link
@link.setter
def link(self, x):
if isinstance(x, str):
self.check_string_link(x)
self._link = Link(x)
elif isinstance(x, Link):
self._link = x
else:
raise ValueError(".link must be set to a string or a Link instance.")
def check_string_link(self, link):
if not link in self.SUPPORTED_LINKS:
raise ValueError(f"Link '{link}' cannot be used with family '{self.name}'")
def set_alias(self, name, alias):
"""Set alias for an auxiliary variable of the family
Parameters
----------
name: str
The name of the variable
alias: str
The new name for the variable
"""
self.aliases.update({name: alias})
def __str__(self):
msg_list = [f"Response distribution: {self.likelihood.name}", f"Link: {self.link.name}"]
if self.likelihood.priors:
priors_msg = "\n ".join([f"{k} ~ {v}" for k, v in self.likelihood.priors.items()])
msg_list += [f"Priors:\n {priors_msg}"]
msg = "\n".join(msg_list)
return msg
def __repr__(self):
return self.__str__()
| mit | 0768a6a663a42d0079d8f50749496b5e | 28.032258 | 96 | 0.568519 | 3.988183 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/components/optimal_90deg.py | 1 | 1902 | import numpy as np
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.types import LayerSpec
@cell
def optimal_90deg(
width: float = 100,
num_pts: int = 15,
length_adjust: float = 1,
layer: LayerSpec = (1, 0),
) -> Component:
"""Returns optimally-rounded 90 degree bend that is sharp on the outer corner.
Args:
width: Width of the ports on either side of the bend.
num_pts: The number of points comprising the curved section of the bend.
length_adjust: Adjusts the length of the non-curved portion of the bend.
layer: Specific layer(s) to put polygon geometry on.
Notes:
Optimal structure from https://doi.org/10.1103/PhysRevB.84.174510
Clem, J., & Berggren, K. (2011). Geometry-dependent critical currents in
superconducting nanocircuits. Physical Review B, 84(17), 1–27.
"""
D = Component()
# Get points of ideal curve
a = 2 * width
v = np.logspace(-length_adjust, length_adjust, num_pts)
xi = (
a
/ 2.0
* ((1 + 2 / np.pi * np.arcsinh(1 / v)) + 1j * (1 + 2 / np.pi * np.arcsinh(v)))
)
xpts = list(np.real(xi))
ypts = list(np.imag(xi))
# Add points for the rest of curve
d = 2 * xpts[0] # Farthest point out * 2, rounded to nearest 100
xpts.append(width)
ypts.append(d)
xpts.append(0)
ypts.append(d)
xpts.append(0)
ypts.append(0)
xpts.append(d)
ypts.append(0)
xpts.append(d)
ypts.append(width)
xpts.append(xpts[0])
ypts.append(ypts[0])
D.add_polygon([xpts, ypts], layer=layer)
D.add_port(name="e1", center=[a / 4, d], width=a / 2, orientation=90, layer=layer)
D.add_port(name="e2", center=[d, a / 4], width=a / 2, orientation=0, layer=layer)
return D
if __name__ == "__main__":
c = optimal_90deg()
c.show(show_ports=True)
| mit | e49b54603fd6db35aebf4a04c291fa86 | 28.230769 | 86 | 0.618421 | 3.030303 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/read/from_dphox.py | 1 | 1638 | from gdsfactory.component import Component, ComponentReference
def from_dphox(device: "dp.Device", foundry: "dp.foundry.Foundry") -> Component:
"""Returns Gdsfactory Component from a dphox Device.
Note that you need to install dphox `pip install dphox`
https://dphox.readthedocs.io/en/latest/index.html
Args:
device: Dphox device.
foundry: Dphox foundry object.
"""
c = Component(device.name)
for layer_name, shapely_multipolygon in device.layer_to_polys.items():
for poly in shapely_multipolygon:
layer = foundry.layer_to_gds_label[layer_name]
c.add_polygon(points=poly, layer=layer)
for ref in device.child_to_device:
child = from_dphox(device.child_to_device[ref], foundry)
for gds_transform in device.child_to_transform[ref][-1]:
new_ref = ComponentReference(
component=child,
origin=(gds_transform.x, gds_transform.y),
rotation=gds_transform.angle,
magnification=gds_transform.mag,
x_reflection=gds_transform.flip_y,
)
new_ref.owner = c
c.add(new_ref)
for port_name, port in device.port.items():
c.add_port(
name=port_name,
center=(port.x, port.y),
orientation=port.a,
width=port.w,
layer=foundry.layer_to_gds_label.get(port.layer, (1, 0)),
)
return c
if __name__ == "__main__":
import dphox as dp
from dphox.demo import mzi
c = from_dphox(mzi, foundry=dp.foundry.FABLESS)
c.show(show_ports=True)
| mit | eb70f6e2d0e09a5f823507c9d839fbaa | 30.5 | 80 | 0.601954 | 3.485106 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/component_layout.py | 1 | 22858 | import numbers
from collections import defaultdict
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from gdstk import Label, Polygon
from numpy import cos, pi, sin
from numpy.linalg import norm
def get_polygons(
instance,
by_spec: Union[bool, Tuple[int, int]] = False,
depth: Optional[int] = None,
include_paths: bool = True,
as_array: bool = True,
) -> Union[List[Polygon], Dict[Tuple[int, int], List[Polygon]]]:
"""Return a list of polygons in this cell.
Args:
by_spec: bool or layer
If True, the return value is a dictionary with the
polygons of each individual pair (layer, datatype), which
are used as keys. If set to a tuple of (layer, datatype),
only polygons with that specification are returned.
depth: integer or None
If not None, defines from how many reference levels to
retrieve polygons. References below this level will result
in a bounding box. If `by_spec` is True the key will be the
name of this cell.
include_paths: If True, polygonal representation of paths are also included in the result.
as_array: when as_array=false, return the Polygon objects instead. polygon objects have more information (especially when by_spec=False) and will be faster to retrieve.
Returns
out: list of array-like[N][2] or dictionary
List containing the coordinates of the vertices of each
polygon, or dictionary with with the list of polygons (if
`by_spec` is True).
Note:
Instances of `FlexPath` and `RobustPath` are also included in
the result by computing their polygonal boundary.
"""
import gdsfactory as gf
if hasattr(instance, "_cell"):
layers = instance.get_layers()
gdstk_instance = instance._cell
else:
layers = instance.parent.get_layers()
gdstk_instance = instance._reference
if not by_spec:
polygons = gdstk_instance.get_polygons(depth=depth, include_paths=include_paths)
elif by_spec is True:
polygons = {
layer: gdstk_instance.get_polygons(
depth=depth,
layer=layer[0],
datatype=layer[1],
include_paths=include_paths,
)
for layer in layers
}
else:
by_spec = gf.get_layer(by_spec)
polygons = gdstk_instance.get_polygons(
depth=depth,
layer=by_spec[0],
datatype=by_spec[1],
include_paths=include_paths,
)
if not as_array:
return polygons
if by_spec is not True:
return [polygon.points for polygon in polygons]
layer_to_polygons = defaultdict(list)
for layer, polygons_list in polygons.items():
for polygon in polygons_list:
layer_to_polygons[layer].append(polygon.points)
return layer_to_polygons
def _parse_layer(layer):
"""Check if the variable layer is a Layer object, a 2-element list like \
[0, 1] representing layer = 0 and datatype = 1, or just a layer number.
Args:
layer: int, array-like[2], or set Variable to check.
Returns:
(gds_layer, gds_datatype) : array-like[2]
The layer number and datatype of the input.
"""
if hasattr(layer, "gds_layer"):
gds_layer, gds_datatype = layer.gds_layer, layer.gds_datatype
elif np.shape(layer) == (2,): # In form [3,0]
gds_layer, gds_datatype = layer[0], layer[1]
elif np.shape(layer) == (1,): # In form [3]
gds_layer, gds_datatype = layer[0], 0
elif layer is None:
gds_layer, gds_datatype = 0, 0
elif isinstance(layer, numbers.Number):
gds_layer, gds_datatype = layer, 0
else:
raise ValueError(
"""_parse_layer() was passed something
that could not be interpreted as a layer: layer = %s"""
% layer
)
return (gds_layer, gds_datatype)
class _GeometryHelper:
"""Helper class for a class with functions move() and the property bbox.
It uses that function+property to enable you to do things like check what the
center of the bounding box is (self.center), and also to do things like move
the bounding box such that its maximum x value is 5.2 (self.xmax = 5.2).
"""
@property
def center(self):
"""Returns the center of the bounding box."""
return np.sum(self.bbox, 0) / 2
@center.setter
def center(self, destination):
"""Sets the center of the bounding box.
Args:
destination : array-like[2] Coordinates of the new bounding box center.
"""
self.move(destination=destination, origin=self.center)
@property
def x(self):
"""Returns the x-coordinate of the center of the bounding box."""
return np.sum(self.bbox, 0)[0] / 2
@x.setter
def x(self, destination):
"""Sets the x-coordinate of the center of the bounding box.
Args:
destination : int or float x-coordinate of the bbox center.
"""
destination = (destination, self.center[1])
self.move(destination=destination, origin=self.center, axis="x")
@property
def y(self):
"""Returns the y-coordinate of the center of the bounding box."""
return np.sum(self.bbox, 0)[1] / 2
@y.setter
def y(self, destination):
"""Sets the y-coordinate of the center of the bounding box.
Args:
destination : int or float
y-coordinate of the bbox center.
"""
destination = (self.center[0], destination)
self.move(destination=destination, origin=self.center, axis="y")
@property
def xmax(self):
"""Returns the maximum x-value of the bounding box."""
return self.bbox[1][0]
@xmax.setter
def xmax(self, destination):
"""Sets the x-coordinate of the maximum edge of the bounding box.
Args:
destination : int or float
x-coordinate of the maximum edge of the bbox.
"""
self.move(destination=(destination, 0), origin=self.bbox[1], axis="x")
@property
def ymax(self):
"""Returns the maximum y-value of the bounding box."""
return self.bbox[1][1]
@ymax.setter
def ymax(self, destination):
"""Sets the y-coordinate of the maximum edge of the bounding box.
Args:
destination : int or float y-coordinate of the maximum edge of the bbox.
"""
self.move(destination=(0, destination), origin=self.bbox[1], axis="y")
@property
def xmin(self):
"""Returns the minimum x-value of the bounding box."""
return self.bbox[0][0]
@xmin.setter
def xmin(self, destination):
"""Sets the x-coordinate of the minimum edge of the bounding box.
Args:
destination : int or float x-coordinate of the minimum edge of the bbox.
"""
self.move(destination=(destination, 0), origin=self.bbox[0], axis="x")
@property
def ymin(self):
"""Returns the minimum y-value of the bounding box."""
return self.bbox[0][1]
@ymin.setter
def ymin(self, destination):
"""Sets the y-coordinate of the minimum edge of the bounding box.
Args:
destination : int or float y-coordinate of the minimum edge of the bbox.
"""
self.move(destination=(0, destination), origin=self.bbox[0], axis="y")
@property
def size(self):
"""Returns the (x, y) size of the bounding box."""
bbox = self.bbox
return bbox[1] - bbox[0]
@property
def xsize(self):
"""Returns the horizontal size of the bounding box."""
bbox = self.bbox
return bbox[1][0] - bbox[0][0]
@property
def ysize(self):
"""Returns the vertical size of the bounding box."""
bbox = self.bbox
return bbox[1][1] - bbox[0][1]
def movex(self, origin=0, destination=None):
"""Moves an object by a specified x-distance.
Args:
origin: array-like[2], Port, or key Origin point of the move.
destination: array-like[2], Port, key, or None Destination point of the move.
"""
if destination is None:
destination = origin
origin = 0
return self.move(origin=(origin, 0), destination=(destination, 0))
def movey(self, origin=0, destination=None):
"""Moves an object by a specified y-distance.
Args:
origin : array-like[2], Port, or key Origin point of the move.
destination : array-like[2], Port, or key Destination point of the move.
"""
if destination is None:
destination = origin
origin = 0
return self.move(origin=(0, origin), destination=(0, destination))
def __add__(self, element):
"""Adds an element to a Group.
Args:
element: Component, ComponentReference, Port, Polygon,
Label, or Group to add.
"""
if isinstance(self, Group):
G = Group()
G.add(self.elements)
G.add(element)
else:
G = Group([self, element])
return G
class Group(_GeometryHelper):
"""Group objects together so you can manipulate them as a single object \
(move/rotate/mirror)."""
def __init__(self, *args):
"""Initialize Group."""
self.elements = []
self.add(args)
def __repr__(self) -> str:
"""Prints the number of elements in the Group."""
return f"Group ({len(self.elements)} elements total)"
def __len__(self) -> float:
"""Returns the number of elements in the Group."""
return len(self.elements)
def __iadd__(self, element) -> "Group":
"""Adds an element to the Group.
Args:
element: Component, ComponentReference, Port, Polygon,
Label, or Group to add.
"""
return self.add(element)
@property
def bbox(self):
"""Returns the bounding boxes of the Group."""
if len(self.elements) == 0:
raise ValueError("Group is empty, no bbox is available")
bboxes = np.empty([len(self.elements), 4])
for n, e in enumerate(self.elements):
bboxes[n] = e.bbox.flatten()
bbox = (
(bboxes[:, 0].min(), bboxes[:, 1].min()),
(bboxes[:, 2].max(), bboxes[:, 3].max()),
)
return np.array(bbox)
def add(self, element) -> "Group":
"""Adds an element to the Group.
Args:
element: Component, ComponentReference, Port, Polygon,
Label, or Group to add.
"""
from gdsfactory.component import Component
from gdsfactory.component_reference import ComponentReference
if _is_iterable(element):
[self.add(e) for e in element]
elif element is None:
return self
elif isinstance(
element, (Component, ComponentReference, Polygon, Label, Group)
):
self.elements.append(element)
else:
raise ValueError(
"add() Could not add element to Group, the only "
"allowed element types are "
"(Component, ComponentReference, Polygon, Label, Group)"
)
# Remove non-unique entries
used = set()
self.elements = [
x for x in self.elements if x not in used and (used.add(x) or True)
]
return self
def rotate(self, angle: float = 45, center=(0, 0)) -> "Group":
"""Rotates all elements in a Group around the specified centerpoint.
Args:
angle : int or float
Angle to rotate the Group in degrees.
center : array-like[2] or None
center of the Group.
"""
for e in self.elements:
e.rotate(angle=angle, center=center)
return self
def move(self, origin=(0, 0), destination=None, axis=None) -> "Group":
"""Moves the Group from the origin point to the destination.
Both origin and destination can be 1x2 array-like, Port, or a key
corresponding to one of the Ports in this Group.
Args:
origin : array-like[2], Port, or key
Origin point of the move.
destination : array-like[2], Port, or key
Destination point of the move.
axis : {'x', 'y'}
Direction of the move.
"""
for e in self.elements:
e.move(origin=origin, destination=destination, axis=axis)
return self
def mirror(self, p1=(0, 1), p2=(0, 0)) -> "Group":
"""Mirrors a Group across the line formed between the two specified points.
``points`` may be input as either single points
[1,2] or array-like[N][2], and will return in kind.
Args:
p1 : array-like[N][2]
First point of the line.
p2 : array-like[N][2]
Second point of the line.
"""
for e in self.elements:
e.mirror(p1=p1, p2=p2)
return self
def distribute(
self, direction="x", spacing=100, separation=True, edge="center"
) -> "Group":
"""Distributes the elements in the Group.
Args:
direction : {'x', 'y'}
Direction of distribution; either a line in the x-direction or
y-direction.
spacing : int or float
Distance between elements.
separation : bool
If True, guarantees elements are separated with a fixed spacing
between; if False, elements are spaced evenly along a grid.
edge : {'x', 'xmin', 'xmax', 'y', 'ymin', 'ymax'}
Which edge to perform the distribution along (unused if
separation == True)
"""
_distribute(
elements=self.elements,
direction=direction,
spacing=spacing,
separation=separation,
edge=edge,
)
return self
def align(self, alignment="ymax") -> "Group":
"""Aligns the elements in the Group.
Args:
alignment : {'x', 'y', 'xmin', 'xmax', 'ymin', 'ymax'}
Which edge to align along (e.g. 'ymax' will align move the elements
such that all of their topmost points are aligned)
"""
_align(elements=self.elements, alignment=alignment)
return self
def _rotate_points(points, angle: float = 45, center=(0, 0)):
"""Rotates points around a centerpoint defined by ``center``.
``points`` may be input as either single points [1,2] or array-like[N][2],
and will return in kind.
Args:
points : array-like[N][2]
Coordinates of the element to be rotated.
angle : int or float
Angle to rotate the points.
center : array-like[2]
Centerpoint of rotation.
Returns:
A new set of points that are rotated around ``center``.
"""
if angle == 0:
return points
angle = angle * pi / 180
ca = cos(angle)
sa = sin(angle)
sa = np.array((-sa, sa))
c0 = np.array(center)
if np.asarray(points).ndim == 2:
return (points - c0) * ca + (points - c0)[:, ::-1] * sa + c0
if np.asarray(points).ndim == 1:
return (points - c0) * ca + (points - c0)[::-1] * sa + c0
def _reflect_points(points, p1=(0, 0), p2=(1, 0)):
"""Reflects points across the line formed by p1 and p2.
``points`` may be input as either single points [1,2] or array-like[N][2],
and will return in kind.
Args:
points : array-like[N][2]
Coordinates of the element to be reflected.
p1 : array-like[2]
Coordinates of the start of the reflecting line.
p2 : array-like[2]
Coordinates of the end of the reflecting line.
Returns:
A new set of points that are reflected across ``p1`` and ``p2``.
"""
# From http://math.stackexchange.com/questions/11515/point-reflection-across-a-line
points = np.array(points)
p1 = np.array(p1)
p2 = np.array(p2)
if np.asarray(points).ndim == 1:
return (
2 * (p1 + (p2 - p1) * np.dot((p2 - p1), (points - p1)) / norm(p2 - p1) ** 2)
- points
)
if np.asarray(points).ndim == 2:
return np.array(
[
2 * (p1 + (p2 - p1) * np.dot((p2 - p1), (p - p1)) / norm(p2 - p1) ** 2)
- p
for p in points
]
)
def _is_iterable(items):
"""Checks if the passed variable is iterable.
Args:
items: any Item to check for iterability.
"""
return isinstance(items, (list, tuple, set, np.ndarray))
def _parse_coordinate(c):
"""Translates various inputs (lists, tuples, Ports) to an (x,y) coordinate.
Args:
c: array-like[N] or Port
Input to translate into a coordinate.
Returns:
c : array-like[2]
Parsed coordinate.
"""
if hasattr(c, "center"):
return c.center
elif np.array(c).size == 2:
return c
else:
raise ValueError(
"Could not parse coordinate, input should be array-like (e.g. [1.5,2.3] or a Port"
)
def _parse_move(origin, destination, axis):
"""Translates input coordinates to changes in position in the x and y directions.
Args:
origin : array-like[2] of int or float, Port, or key
Origin point of the move.
destination : array-like[2] of int or float, Port, key, or None
Destination point of the move.
axis : {'x', 'y'} Direction of move.
Returns:
dx : int or float
Change in position in the x-direction.
dy : int or float
Change in position in the y-direction.
"""
# If only one set of coordinates is defined, make sure it's used to move things
if destination is None:
destination = origin
origin = [0, 0]
d = _parse_coordinate(destination)
o = _parse_coordinate(origin)
if axis == "x":
d = (d[0], o[1])
if axis == "y":
d = (o[0], d[1])
dx, dy = np.array(d) - o
return dx, dy
def _distribute(elements, direction="x", spacing=100, separation=True, edge=None):
"""Takes a list of elements and distributes them either equally along a \
grid or with a fixed spacing between them.
Args:
elements : array-like of gdsfactory objects
Elements to distribute.
direction : {'x', 'y'}
Direction of distribution; either a line in the x-direction or
y-direction.
spacing : int or float
Distance between elements.
separation : bool
If True, guarantees elements are separated with a fixed spacing between; if False, elements are spaced evenly along a grid.
edge : {'x', 'xmin', 'xmax', 'y', 'ymin', 'ymax'}
Which edge to perform the distribution along (unused if
separation == True)
Returns:
elements : Component, ComponentReference, Port, Polygon, Label, or Group
Distributed elements.
"""
if len(elements) == 0:
return elements
if direction not in ({"x", "y"}):
raise ValueError("distribute(): 'direction' argument must be either 'x' or'y'")
if (
(direction == "x")
and (edge not in ({"x", "xmin", "xmax"}))
and (not separation)
):
raise ValueError(
"distribute(): When `separation` == False and direction == 'x',"
" the `edge` argument must be one of {'x', 'xmin', 'xmax'}"
)
if (
(direction == "y")
and (edge not in ({"y", "ymin", "ymax"}))
and (not separation)
):
raise ValueError(
"distribute(): When `separation` == False and direction == 'y',"
" the `edge` argument must be one of {'y', 'ymin', 'ymax'}"
)
if direction == "y":
sizes = [e.ysize for e in elements]
if direction == "x":
sizes = [e.xsize for e in elements]
spacing = np.array([spacing] * len(elements))
if separation: # Then `edge` doesn't apply
if direction == "x":
edge = "xmin"
if direction == "y":
edge = "ymin"
else:
sizes = np.zeros(len(spacing))
# Calculate new positions and move each element
start = elements[0].__getattribute__(edge)
positions = np.cumsum(np.concatenate(([start], (spacing + sizes))))
for n, e in enumerate(elements):
e.__setattr__(edge, positions[n])
return elements
def _align(elements, alignment="ymax"):
"""Aligns lists of gdsfactory elements.
Args:
elements : array-like of gdsfactory objects
Elements to align.
alignment : {'x', 'y', 'xmin', 'xmax', 'ymin', 'ymax'}
Which edge to align along (e.g. 'ymax' will align move the elements such
that all of their topmost points are aligned)
Returns
elements : array-like of gdsfactory objects
Aligned elements.
"""
if len(elements) == 0:
return elements
if alignment not in (["x", "y", "xmin", "xmax", "ymin", "ymax"]):
raise ValueError(
"'alignment' argument must be one of 'x','y','xmin', 'xmax', 'ymin','ymax'"
)
value = Group(elements).__getattribute__(alignment)
for e in elements:
e.__setattr__(alignment, value)
return elements
def _line_distances(points, start, end):
if np.all(start == end):
return np.linalg.norm(points - start, axis=1)
vec = end - start
cross = np.cross(vec, start - points)
return np.divide(abs(cross), np.linalg.norm(vec))
def _simplify(points, tolerance=0):
"""Ramer–Douglas–Peucker algorithm for line simplification.
Takes an array of points of shape (N,2) and removes excess points in the line.
The remaining points form a identical line to within `tolerance` from the original
"""
# From https://github.com/fhirschmann/rdp/issues/7
# originally written by Kirill Konevets https://github.com/kkonevets
M = np.asarray(points)
start, end = M[0], M[-1]
dists = _line_distances(M, start, end)
index = np.argmax(dists)
dmax = dists[index]
if dmax <= tolerance:
return np.array([start, end])
result1 = _simplify(M[: index + 1], tolerance)
result2 = _simplify(M[index:], tolerance)
return np.vstack((result1[:-1], result2))
| mit | cc706a22b4309db042a71953201a1200 | 31.695279 | 176 | 0.575917 | 4.029267 | false | false | false | false |
gdsfactory/gdsfactory | gdsfactory/samples/23_reticle.py | 1 | 1796 | """Sample of a reticle top level Component."""
import gdsfactory as gf
from gdsfactory.types import Component
def mzi_te_pads1(**kwargs) -> Component:
c = gf.c.mzi_phase_shifter_top_heater_metal(delta_length=40)
c = gf.routing.add_fiber_single(c)
c = c.rotate(-90)
c = gf.routing.add_electrical_pads_top(c)
gf.add_labels.add_labels_to_ports_electrical(component=c, prefix=f"elec-{c.name}-")
return c
def mzi_te_pads2(**kwargs) -> Component:
c = gf.c.mzi_phase_shifter_top_heater_metal(delta_length=40)
c = gf.routing.add_fiber_single(c)
c = c.rotate(-90)
c = gf.routing.add_electrical_pads_top_dc(c)
gf.add_labels.add_labels_to_ports_electrical(component=c, prefix=f"elec-{c.name}-")
return c
def mzi_te_pads3(**kwargs) -> Component:
c = gf.c.mzi_phase_shifter_top_heater_metal(delta_length=40)
c = gf.routing.add_fiber_single(c)
c = c.rotate(-90)
c = gf.routing.add_electrical_pads_shortest(c)
gf.add_labels.add_labels_to_ports_vertical_dc(component=c, prefix=f"elec-{c.name}-")
return c
if __name__ == "__main__":
# c = mzi_te_pads3()
# c.show(show_ports=True)
gc = gf.c.grating_coupler_elliptical_tm()
c = gf.c.mzi_phase_shifter_top_heater_metal(delta_length=40)
c = gf.routing.add_fiber_single(
c, get_input_label_text_function=None, grating_coupler=gc
)
c = c.rotate(-90)
c = gf.routing.add_electrical_pads_top(c)
gf.add_labels.add_labels_to_ports_electrical(component=c, prefix=f"elec-{c.name}-")
gf.add_labels.add_labels_to_ports(
component=c, port_type="loopback", prefix=f"opttm1500-{c.name}-"
)
gf.add_labels.add_labels_to_ports(
component=c, port_type="vertical_tm", prefix=f"opttm1500-{c.name}-"
)
c.show(show_ports=True)
| mit | f4781ebce29d535998af7058b3ca4daa | 33.538462 | 88 | 0.658129 | 2.633431 | false | false | false | false |
galaxyproject/cargo-port | bin/gsl.py | 1 | 2648 | #!/usr/bin/env python
from future import standard_library
standard_library.install_aliases()
from builtins import str
import click
import hashlib
from io import StringIO
import logging
import os
import urllib.request, urllib.parse, urllib.error
from cargoport.utils import yield_packages, package_name, PACKAGE_SERVER, get_url
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger()
@click.command()
@click.option('--package_id', help='Package ID', required=True)
@click.option('--package_version', help="Package version, downloads all versions if not specified", default=None, required=False)
@click.option('--urls', help="Override default urls.tsv location", default=PACKAGE_SERVER + "urls.tsv")
@click.option('--download_location', default='./',
help='Location for the downloaded file')
def get(package_id, package_version, urls, download_location):
package_found = False
log.info("Searching for package: "+str(package_id)+" in "+str(urls))
if not os.path.exists(download_location):
os.makedirs(download_location)
handle = None
if '://' in urls:
with urllib.request.urlopen(urls) as uf:
handle = StringIO(uf.read())
elif os.path.exists(urls):
handle = open(urls, 'r')
else:
raise Exception("--urls option does not look like a url or a file path")
for ld in yield_packages(handle):
# TODO: check platform/architecture, failover to all if available?
# iid, version, platform, architecture, upstream_url, checksum, alternate_url = line.split('\t')
if ld['id'] == package_id.strip() and (package_version == None or ld['version'] == package_version):
package_found = True
# I worry about this being unreliable. TODO: add target filename column?
pkg_name = package_name(ld)
storage_path = os.path.join(download_location, pkg_name)
url = get_url(ld)
urllib.request.urlretrieve(url, storage_path)
download_checksum = hashlib.sha256(open(storage_path, 'rb').read()).hexdigest()
if ld['sha256sum'] != download_checksum:
log.error('Checksum does not match, something seems to be wrong.\n'
'{expected}\t(expected)\n{actual}\t(downloaded)').format(
expected=ld['sha256sum'],
actual=download_checksum)
else:
log.info('Download successful for %s.' % (pkg_name))
if not package_found:
log.warning('Package (%s) could not be found in this server.' % (package_id))
if __name__ == '__main__':
get()
| mit | 9922005f610aa90b7325319d76017ed3 | 42.409836 | 129 | 0.64426 | 3.975976 | false | false | false | false |
ssato/python-anyconfig | src/anyconfig/backend/yaml/ruamel_yaml.py | 1 | 4376 | #
# Copyright (C) 2011 - 2021 Satoru SATOH <satoru.satoh@gmail.com>
# SPDX-License-Identifier: MIT
#
"""A backend module to load and dump YAML data files using rumael.yaml.
- Format to support: YAML, http://yaml.org
- Requirement: ruamel.yaml, https://bitbucket.org/ruamel/yaml
- Development Status :: 4 - Beta
- Limitations:
- Multi-documents YAML stream load and dump are not supported.
- Special options:
- All keyword options of yaml.safe_load, yaml.load, yaml.safe_dump and
yaml.dump should work.
- Use 'ac_safe' boolean keyword option if you prefer to call yaml.safe_load
and yaml.safe_dump instead of yaml.load and yaml.dump. Please note that
this option conflicts with 'ac_dict' option and these options cannot be
used at the same time.
- Also, you can give keyword options for ruamel.yaml.YAML.__init__ such like
typ and pure, and can give some members of ruamel.yaml.YAML instance to
control the behaviors such like default_flow_style and allow_duplicate_keys
as keyword options to load and dump functions.
- See also: https://yaml.readthedocs.io
Changelog:
.. versionchanged:: 0.9.8
- Split from the common yaml backend and start to support ruamel.yaml
specific features.
"""
import ruamel.yaml as ryaml
from ...utils import filter_options
from .. import base
from . import common
try:
ryaml.YAML # flake8: noqa
except AttributeError as exc:
raise ImportError('ruamel.yaml may be too old to use!') from exc
_YAML_INIT_KWARGS = ['typ', 'pure', 'plug_ins'] # kwargs for ruamel.yaml.YAML
_YAML_INSTANCE_MEMBERS = ['allow_duplicate_keys', 'allow_unicode',
'block_seq_indent', 'canonical', 'composer',
'constructor', 'default_flow_style', 'default_style',
'dump', 'dump_all', 'emitter', 'encoding',
'explicit_end', 'explicit_start',
'get_constructor_parser',
'get_serializer_representer_emitter', 'indent',
'line_break', 'load', 'load_all', 'map',
'map_indent', 'official_plug_ins', 'old_indent',
'parser', 'prefix_colon', 'preserve_quotes',
'reader', 'register_class', 'representer',
'resolver', 'scanner', 'seq', 'sequence_dash_offset',
'sequence_indent', 'serializer', 'stream', 'tags',
'top_level_block_style_scalar_no_indent_error_1_1',
'top_level_colon_align', 'version', 'width']
_YAML_OPTS = _YAML_INIT_KWARGS + _YAML_INSTANCE_MEMBERS
def yml_fnc(fname, *args, **options):
"""Call loading functions for yaml data.
:param fname:
"load" or "dump", not checked but it should be OK.
see also :func:`yml_load` and :func:`yml_dump`
:param args: [stream] for load or [cnf, stream] for dump
:param options: keyword args may contain "ac_safe" to load/dump safely
"""
options = common.filter_from_options("ac_dict", options)
if 'ac_safe' in options:
options['typ'] = 'safe' # Override it.
iopts = filter_options(_YAML_INIT_KWARGS, options)
oopts = filter_options(_YAML_INSTANCE_MEMBERS, options)
yml = ryaml.YAML(**iopts)
for attr, val in oopts.items():
setattr(yml, attr, val) # e.g. yml.preserve_quotes = True
return getattr(yml, fname)(*args)
def yml_load(stream, container, **options):
"""See :func:`anyconfig.backend.yaml.pyyaml.yml_load`."""
ret = yml_fnc('load', stream, **options)
if ret is None:
return container()
return ret
def yml_dump(data, stream, **options):
"""See :func:`anyconfig.backend.yaml.pyyaml.yml_dump`."""
# .. todo:: Needed?
# if anyconfig.utils.is_dict_like(data):
# if options.get("ac_ordered"):
# factory = collections.OrderedDict
# else:
# factory = dict
# data = anyconfig.dicts.convert_to(data, ac_dict=factory)
return yml_fnc('dump', data, stream, **options)
class Parser(common.Parser):
"""Parser for YAML files."""
_cid = 'ruamel.yaml'
_load_opts = _YAML_OPTS
_dump_opts = _YAML_OPTS
load_from_stream = base.to_method(yml_load)
dump_to_stream = base.to_method(yml_dump)
# vim:sw=4:ts=4:et:
| mit | 8f51f2960989ff4af2464efdf5cc87f8 | 34.008 | 79 | 0.617002 | 3.569331 | false | false | false | false |
ssato/python-anyconfig | src/anyconfig/backend/toml.py | 1 | 1048 | #
# Copyright (C) 2015 - 2021 Satoru SATOH <satoru.satoh@gmail.com>
# SPDX-License-Identifier: MIT
#
# Ref. python -c "import toml; help(toml); ..."
#
r"""A backend module to load and dump TOML files.
- Format to support: TOML, https://github.com/toml-lang/toml
- Requirements: (python) toml module, https://github.com/uiri/toml
- Development Status :: 4 - Beta
- Limitations: None obvious
- Special options:
- toml.load{s,} only accept '_dict' keyword option but it's used already to
pass callable to make a container object.
Changelog:
.. versionadded:: 0.1.0
"""
import toml
from . import base
class Parser(base.StringStreamFnParser):
"""TOML parser."""
_cid = 'toml'
_type = 'toml'
_extensions = ['toml']
_ordered = True
_load_opts = _dump_opts = _dict_opts = ['_dict']
_load_from_string_fn = base.to_method(toml.loads)
_load_from_stream_fn = base.to_method(toml.load)
_dump_to_string_fn = base.to_method(toml.dumps)
_dump_to_stream_fn = base.to_method(toml.dump)
# vim:sw=4:ts=4:et:
| mit | 4fdf12f453b391401283780301eceeed | 24.560976 | 77 | 0.66126 | 2.871233 | false | false | false | false |
ssato/python-anyconfig | src/anyconfig/ioinfo/factory.py | 1 | 2628 | #
# Copyright (C) 2018 - 2021 Satoru SATOH <satoru.satoh @ gmmail.com>
# SPDX-License-Identifier: MIT
#
# pylint: disable=invalid-name
"""ioinfo.main to provide internal APIs used from other modules."""
import pathlib
import typing
from . import constants, datatypes, detectors, utils
def from_path_object(path: pathlib.Path) -> datatypes.IOInfo:
"""Get an IOInfo object made from :class:`pathlib.Path` object ``path``."""
(abs_path, file_ext) = utils.get_path_and_ext(path)
return datatypes.IOInfo(
abs_path, datatypes.IOI_PATH_OBJ, str(abs_path), file_ext
)
def from_path_str(path: str) -> datatypes.IOInfo:
"""Get an IOInfo object made from a str ``path``."""
return from_path_object(pathlib.Path(path).resolve())
def from_io_stream(strm: typing.IO) -> datatypes.IOInfo:
"""Get an IOInfo object made from IO stream object ``strm``."""
path: str = getattr(strm, 'name', '')
if path:
(_path, file_ext) = utils.get_path_and_ext(pathlib.Path(path))
abs_path: str = str(_path)
else:
(abs_path, file_ext) = (path, '')
return datatypes.IOInfo(
strm, datatypes.IOI_STREAM, abs_path, file_ext
)
def make(obj: typing.Any) -> datatypes.IOInfo:
"""Make and return a :class:`datatypes.IOInfo` object from ``obj``."""
if isinstance(obj, datatypes.IOInfo):
return obj
if isinstance(obj, str):
return from_path_str(obj)
if isinstance(obj, pathlib.Path):
return from_path_object(obj)
# Which is better? isinstance(obj, io.IOBase):
if getattr(obj, 'read', False):
return from_io_stream(obj)
raise ValueError(repr(obj))
def make_itr(obj: typing.Any, marker: str = constants.GLOB_MARKER
) -> typing.Iterator[datatypes.IOInfo]:
"""Make and yield a series of :class:`datatypes.IOInfo` objects."""
if isinstance(obj, datatypes.IOInfo):
yield obj
elif detectors.is_path_str(obj):
for path in utils.expand_from_path(pathlib.Path(obj)):
yield from_path_object(path)
elif detectors.is_path_obj(obj):
for path in utils.expand_from_path(obj):
yield from_path_object(path)
elif detectors.is_io_stream(obj):
yield from_io_stream(obj)
else:
for item in obj:
for ioi in make_itr(item, marker=marker):
yield ioi
def makes(obj: typing.Any, marker: str = constants.GLOB_MARKER
) -> typing.List[datatypes.IOInfo]:
"""Make and return a list of :class:`datatypes.IOInfo` objects."""
return list(make_itr(obj, marker=marker))
# vim:sw=4:ts=4:et:
| mit | 873ee5d77c1c919ced4d011682f4c82b | 29.206897 | 79 | 0.639269 | 3.272727 | false | false | false | false |
hugsy/gef | tests/commands/pattern.py | 1 | 3009 | """
Pattern commands test module
"""
import pytest
from tests.utils import ARCH, GefUnitTestGeneric, _target, gdb_run_cmd, is_64b
class PatternCommand(GefUnitTestGeneric):
"""`pattern` command test module"""
def test_cmd_pattern_create(self):
cmd = "pattern create -n 4 32"
res = gdb_run_cmd(cmd)
self.assertNoException(res)
self.assertIn("aaaabaaacaaadaaaeaaaf", res)
cmd = "pattern create -n 8 32"
res = gdb_run_cmd(cmd)
self.assertNoException(res)
self.assertIn("aaaaaaaabaaaaaaacaaaaaaadaaaaaaa", res)
@pytest.mark.skipif(ARCH not in ("x86_64", "aarch64", "i686", "armv7l"),
reason=f"Skipped for {ARCH}")
def test_cmd_pattern_search(self):
target = _target("pattern")
if ARCH == "aarch64":
lookup_register = "$x30"
expected_offsets = (16, 16, 5, 9)
elif ARCH == "armv7l":
lookup_register = "$r11"
expected_offsets = (8, 8, 5, 9)
elif ARCH == "x86_64":
lookup_register = "$rbp"
expected_offsets = (8, 8, 5, 9)
elif ARCH == "i686":
lookup_register = "$ebp"
# expected_offsets = (16, None, 5, 9)
expected_offsets = (16, 16, 5, 9)
else:
raise ValueError("Invalid architecture")
#0
cmd = f"pattern search -n 4 {lookup_register}"
before = ("set args aaaabaaacaaadaaaeaaafaaagaaahaaa", "run")
res = gdb_run_cmd(cmd, before=before, target=target)
self.assertNoException(res)
self.assertIn(f"Found at offset {expected_offsets[0]} (little-endian search) likely", res)
#1
if is_64b():
cmd = f"pattern search -n 8 {lookup_register}"
before = ("set args aaaaaaaabaaaaaaacaaaaaaadaaaaaaa", "run")
res = gdb_run_cmd(cmd, before=before, target=target)
self.assertNoException(res)
self.assertIn(f"Found at offset {expected_offsets[1]} (little-endian search) likely", res)
#2
cmd = "pattern search -n 4 caaa"
before = ("set args aaaabaaacaaadaaaeaaafaaagaaahaaa", "run")
res = gdb_run_cmd(cmd, before=before, target=target)
self.assertNoException(res)
self.assertIn(f"Found at offset {expected_offsets[2]} (little-endian search) likely", res)
#3
if is_64b():
cmd = "pattern search -n 8 caaaaaaa"
before = ("set args aaaaaaaabaaaaaaacaaaaaaadaaaaaaa", "run")
res = gdb_run_cmd(cmd, before=before, target=target)
self.assertNoException(res)
self.assertIn(f"Found at offset {expected_offsets[3]} (little-endian search) likely", res)
#4
cmd = "pattern search -n 4 JUNK"
before = ("set args aaaabaaacaaadaaaeaaafaaagaaahaaa", "run")
res = gdb_run_cmd(cmd, before=before, target=target)
self.assertNoException(res)
self.assertIn(f"not found", res)
| mit | 261b784212b16c119b6b8831994fcd53 | 36.6125 | 102 | 0.589232 | 3.50291 | false | true | false | false |
istresearch/traptor | traptor/rule_set.py | 1 | 8448 | import six
class RuleSet(object):
"""
Rule sets provide a convenient data structure to work with lists of
collection rules.
The rule set class enables operations using set operations (union,
difference, intersection) and simple syntax for addition and subtraction
using the +/- operators.
Rule sets also maintain an internal index of rules by their value, to help
optimize the assignment and distribution of collection rules.
"""
# Dictionary of rule_id -> rule
rules = None
# Unique set of rule IDs
rule_ids = None
# Dictionary of rule value -> dictionary rule_id -> rule
rules_by_value = None
def __init__(self):
self.rules = dict()
self.rule_ids = set()
self.rules_by_value = dict()
def append(self, rule):
"""
Adds a collection rule to this rule set.
:param rule: Collection rule
:type rule: dict[str, object]
"""
rule_id = rule.get('rule_id')
rule_value = self.get_normalized_value(rule)
if rule_id is None or rule_value is None:
raise ValueError("The provided Cooper rule is missing rule id or value.")
self.rules[rule_id] = rule
self.rule_ids.add(rule_id)
if rule_value not in self.rules_by_value:
self.rules_by_value[rule_value] = dict()
self.rules_by_value[rule_value][rule_id] = rule
def remove(self, rule_id):
"""
Removes a collection rule from this rule set.
:param rule_id: ID of a collection rule
:type rule_id: str
:return: The rule that was removed
:rtype: dict[str, object]
"""
rule = None
rule_value = None
if rule_id in self.rules:
rule = self.rules[rule_id]
rule_value = self.get_normalized_value(self.rules[rule_id])
del self.rules[rule_id]
if rule_id in self.rule_ids:
self.rule_ids.remove(rule_id)
if rule_value is not None and rule_value in self.rules_by_value:
if rule_id in self.rules_by_value[rule_value]:
del self.rules_by_value[rule_value][rule_id]
if len(self.rules_by_value[rule_value]) == 0:
del self.rules_by_value[rule_value]
return rule
def union(self, other):
"""
Performs a union (addition) set operation, returning a new RuleSet
instance containing the combined unique set of elements from both
RuleSets.
:param other: Another RuleSet instance
:type other: RuleSet
:return: A new RuleSet containing elements of both RuleSets
:rtype: RuleSet
"""
rs = RuleSet()
for rule in six.itervalues(self.rules):
rs.append(rule)
for rule in six.itervalues(other.rules):
rs.append(rule)
return rs
def difference(self, other):
"""
Performs a difference (subtraction) set operation, returning a new
RuleSet containing the elements of this RuleSet which are not in the
other RuleSet.
:param other: Another RuleSet instance
:type other: RuleSet
:return: A new RuleSet
:rtype: RuleSet
"""
rs = RuleSet()
rule_ids = self.rule_ids - other.rule_ids
for rule_id in rule_ids:
rs.append(self.rules[rule_id])
return rs
def intersection(self, other):
"""
Performs an intersection set operation, returning a new RuleSet that
contains the unique set of elements that both RuleSets have in common.
:param other: Another RuleSet instance
:type other: RuleSet
:return: A new RuleSet
:rtype: RuleSet
"""
return (self + other) - ((self - other) + (other - self))
def add_local(self, other):
"""
Adds the elements of the other set to this set, updating this RuleSet
instance to contain the combined unique set of elements from both
RuleSets.
:param other: Another RuleSet instance
:type other: RuleSet
"""
for rule in other:
self.append(rule)
def subtract_local(self, other):
"""
Removes the elements of the other set from this set.
:param other: Another RuleSet instance
:type other: RuleSet
"""
for rule in other:
self.remove(rule.get('rule_id'))
@staticmethod
def get_normalized_value(rule):
"""
Normalize the rule value to ensure correct assignment.
:param rule: A rule dictionary.
:type rule: dict
:return: The normalized value of the rule.
:rtype: str
"""
value = rule.get('value')
if value is not None:
if 'orig_type' in rule and rule['orig_type'] is not None:
rule_type = rule['orig_type']
# For hastag rules ensure each term starts with '#' to
# distinguish from keyword and prevent over-collection
if rule_type == 'hashtag':
tokens = []
for token in value.split(' '):
if token:
if not token.startswith('#'):
tokens.append('#' + token)
else:
tokens.append(token)
value = ' '.join(tokens)
# Normalizing the rule value to lower case
value = value.lower()
return value
def __add__(self, other):
"""
Operator (+) implementation to perform a union operation.
:param other:
:return:
"""
return self.union(other)
def __sub__(self, other):
"""
Operator (-) implementation to perform a difference operation.
:param other:
:return:
"""
return self.difference(other)
def __contains__(self, item):
"""
Tests if a given item exists in this RuleSet.
:param item: A collection rule
:type item: dict[str, object]
:return: True if the rule is a member of this set, otherwise False.
"""
if item is not None and isinstance(item, dict) and 'rule_id' in item:
return item.get('rule_id') in self.rule_ids
return False
def __len__(self):
"""
Returns the count of collection rules in this RuleSet.
:return:
"""
return len(self.rule_ids)
def __iter__(self):
"""
Returns an iterator of the collection rules in the RuleSet.
:return:
"""
return six.itervalues(self.rules)
def __eq__(self, other):
"""
Tests for equality between two RuleSet objects. Equality is defined in
this context as having the same set of rule IDs.
:param other:
:return:
"""
return other is not None and isinstance(other, RuleSet) and \
self.rule_ids == other.rule_ids
def __ne__(self, other):
"""
Tests for inequality between two RuleSet objects. Equality is defined in
this context as having the same set of rule IDs.
:param other:
:return:
"""
return other is None or not isinstance(other, RuleSet) or \
self.rule_ids != other.rule_ids
def __repr__(self):
return "(rules: {}, values: {})".format(len(self.rule_ids), len(self.rules_by_value))
class ReadOnlyRuleSet(RuleSet):
"""
A protected RuleSet implementation that disallows addition or removal of
rules.
"""
def __init__(self, rule_set):
"""
The provided RuleSet will continue to back this read-only instance, and
any changes made to the original RuleSet will be portrayed here.
:param rule_set: A RuleSet instance to back this
:type rule_set: RuleSet
"""
super(ReadOnlyRuleSet, self).__init__()
self.rules = rule_set.rules
self.rule_ids = rule_set.rule_ids
self.rules_by_value = rule_set.rules_by_value
def append(self, rule):
raise NotImplementedError("Appending rules is not permitted on a ReadOnlyRuleSet.")
def remove(self, rule_id):
raise NotImplementedError("Removing rules is not permitted on a ReadOnlyRuleSet.")
| mit | 9616a3151ccd3987943b5b62419efb07 | 29.064057 | 93 | 0.572917 | 4.377202 | false | false | false | false |
javipalanca/spade | examples/join.py | 1 | 1027 | import asyncio
import getpass
from spade import quit_spade
from spade.agent import Agent
from spade.behaviour import OneShotBehaviour
class DummyAgent(Agent):
class LongBehav(OneShotBehaviour):
async def run(self):
await asyncio.sleep(5)
print("Long Behaviour has finished")
class WaitingBehav(OneShotBehaviour):
async def run(self):
await self.agent.behav.join() # this join must be awaited
print("Waiting Behaviour has finished")
async def setup(self):
print("Agent starting . . .")
self.behav = self.LongBehav()
self.add_behaviour(self.behav)
self.behav2 = self.WaitingBehav()
self.add_behaviour(self.behav2)
if __name__ == "__main__":
jid = input("JID> ")
passwd = getpass.getpass()
dummy = DummyAgent(jid, passwd)
future = dummy.start()
future.result()
dummy.behav2.join() # this join must not be awaited
print("Stopping agent.")
dummy.stop()
quit_spade()
| mit | ed01e1265a3850854a469584beaab414 | 23.452381 | 70 | 0.635833 | 3.481356 | false | false | false | false |
istresearch/traptor | traptor/traptor_birdy.py | 1 | 2333 |
from .birdy.twitter import StreamClient, StreamResponse
class TraptorStreamResponse(StreamResponse):
"""
Adds proper connection closing.
"""
_response = None
def __init__(self, response, request_method, json_object_hook):
super(TraptorStreamResponse, self).__init__(response, request_method, json_object_hook)
self._response = response
def stream(self):
return self._stream_iter()
def close(self):
"""
Close the stream we have with Twitter.
:return:
"""
if self._response is not None:
self._response.close()
class TraptorBirdyClient(StreamClient):
"""
Subclass the Birdy StreamClient to add socket timeout configuration, proper
connection closing, and remove the built-in parsing.
"""
_connect_timeout = 30
def __init__(self, consumer_key, consumer_secret, access_token, access_token_secret, connect_timeout=None):
super(TraptorBirdyClient, self).__init__(consumer_key, consumer_secret,
access_token, access_token_secret)
if connect_timeout is not None:
self._connect_timeout = connect_timeout
@staticmethod
def get_json_object_hook(data):
"""
Vanilla pass-through.
:param data:
:return: untouched
"""
return data
def make_api_call(self, method, url, **request_kwargs):
"""
Twitter recommends a socket timeout of 90 seconds, giving them 3
attempts to deliver keep-alive messages at 30-second intervals.
:param method:
:param url:
:param request_kwargs:
:return:
"""
request_kwargs['timeout'] = (self._connect_timeout, 90)
return self.session.request(method, url, stream=True, **request_kwargs)
def handle_response(self, method, response):
"""
We override to return our own TraptorStreamResponse which allows us to
close and cleanup the connection to Twitter.
:param method:
:param response:
:return:
"""
if response.status_code == 200:
return TraptorStreamResponse(response, method, self.get_json_object_hook)
return super(TraptorBirdyClient, self).handle_response(method, response)
| mit | 12ecb511b1e307343b2cf3f1c1189263 | 29.697368 | 111 | 0.620231 | 4.312384 | false | false | false | false |
solvebio/solvebio-python | solvebio/test/test_lookup.py | 1 | 1302 | from __future__ import absolute_import
from .helper import SolveBioTestCase
class LookupTests(SolveBioTestCase):
def setUp(self):
super(LookupTests, self).setUp()
self.dataset = self.client.Object.get_by_full_path(
self.TEST_DATASET_FULL_PATH)
def test_lookup_error(self):
# Check that incorrect lookup results in empty list.
lookup_one = self.dataset.lookup('test')
self.assertEqual(lookup_one, [])
lookup_two = self.dataset.lookup('test', 'nothing')
self.assertEqual(lookup_two, [])
def test_lookup_correct(self):
# Check that lookup with specific sbid is correct.
records = list(self.dataset.query(limit=2))
record_one = records[0]
record_two = records[1]
sbid_one = record_one['_id']
sbid_two = record_two['_id']
lookup_one = self.dataset.lookup(sbid_one)
self.assertEqual(lookup_one[0], record_one)
lookup_two = self.dataset.lookup(sbid_two)
self.assertEqual(lookup_two[0], record_two)
# Check that combining sbids returns list of correct results.
joint_lookup = self.dataset.lookup(sbid_one, sbid_two)
self.assertEqual(joint_lookup[0], record_one)
self.assertEqual(joint_lookup[1], record_two)
| mit | 742db7d6dec96b9d2f25b507913c4beb | 33.263158 | 69 | 0.643625 | 3.677966 | false | true | false | false |
solvebio/solvebio-python | solvebio/global_search.py | 1 | 7763 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from .client import client
from .resource import Object
from .resource import Vault
from .query import QueryBase
from .query import Query
from .query import Filter
import logging
logger = logging.getLogger('solvebio')
class GlobalSearch(Query):
"""
GlobalSearch acts as a request wrapper that generates a request from Filter objects,
and can iterate through streaming result sets.
"""
def __init__(
self,
query=None,
filters=None,
entities=None,
entities_match='any',
vault_scope='all',
ordering=None,
limit=float('inf'),
page_size=QueryBase.DEFAULT_PAGE_SIZE,
result_class=dict,
debug=False,
raw_results=False,
**kwargs):
"""
Creates a new Query object.
:Parameters:
- `query` (optional): An optional query string (advanced search).
- `filters` (optional): Filter or List of filter objects.
- `entities` (optional): List of entity tuples to filter on (entity type, entity).
- `entities_match` (optional): Can be 'all' or 'any' (match any provided entity).
- `vault_scope` (optional): Can be 'all' or 'access'.
- `ordering` (optional): List of fields to order the results by.
- `limit` (optional): Maximum number of query results to return.
- `page_size` (optional): Number of results to fetch per query page.
- `result_class` (optional): Class of object returned by query.
- `debug` (optional): Sends debug information to the API.
- `raw_results` (optional): Whether to use raw API response or to cast logical
objects to Vault and Object instances.
"""
super(GlobalSearch, self).__init__(None)
self._data_url = '/v2/search'
self._query = query
self._entities = entities
self._entities_match = entities_match
self._vault_scope = vault_scope
self._ordering = ordering
self._result_class = result_class
self._debug = debug
self._raw_results = raw_results
self._error = None
if filters:
if isinstance(filters, Filter):
filters = [filters]
else:
filters = []
self._filters = filters
# init response and cursor
self._response = None
# Limit defines the total number of results that will be returned
# from a query involving 1 or more pagination requests.
self._limit = limit
# Page size/offset are the low level API limit and offset params.
self._page_size = int(page_size)
# Page offset can only be set by execute(). It is always set to the
# current absolute offset contained in the buffer.
self._page_offset = None
# slice is set when the Query is being sliced.
# In this case, __iter__() and next() will not
# reset the page_offset to 0 before iterating.
self._slice = None
# parameter error checking
if self._limit < 0:
raise Exception('\'limit\' parameter must be >= 0')
if not 0 < self._page_size <= self.MAX_PAGE_SIZE:
raise Exception('\'page_size\' parameter must be in '
'range [1, {}]'.format(self.MAX_PAGE_SIZE))
# Set up the SolveClient
# (kwargs overrides pre-set, which overrides global)
self._client = kwargs.get('client') or self._client or client
def _clone(self, filters=None, entities=None, limit=None):
new = self.__class__(query=self._query,
limit=self._limit,
entities=self._entities,
ordering=self._ordering,
page_size=self._page_size,
result_class=self._result_class,
vault_scope=self._vault_scope,
entities_match=self._entities_match,
debug=self._debug,
client=self._client)
new._filters += self._filters
if entities:
new._entities = entities
if filters:
new._filters += filters
if limit is not None:
new._limit = limit
return new
def __len__(self):
"""
Returns the total number of results returned in a query. It is the
number of items you can iterate over.
In contrast to count(), the result does take into account any limit
given. In SQL it is like:
SELECT COUNT(*) FROM (
SELECT * FROM <table> [WHERE condition] [LIMIT number]
)
"""
return super(GlobalSearch, self).__len__()
def _build_query(self, **kwargs):
q = {}
if self._query:
q['query'] = self._query
if self._filters:
filters = self._process_filters(self._filters)
if len(filters) > 1:
q['filters'] = [{'and': filters}]
else:
q['filters'] = filters
if self._entities is not None:
q['entities'] = self._entities
if self._ordering is not None:
q['ordering'] = self._ordering
if self._vault_scope is not None:
q['vault_scope'] = self._vault_scope
if self._entities_match is not None:
q['entities_match'] = self._entities_match
if self._debug:
q['debug'] = 'True'
# Add or modify query parameters
# (used by BatchQuery and facets)
q.update(**kwargs)
return q
def execute(self, offset=0, **query):
def _process_result(result):
# Internally the client uses object_type, not type
result['object_type'] = result['type']
if result['object_type'] == 'vault':
return Vault.construct_from(result)
else:
return Object.construct_from(result)
# Call superclass method execute
super(GlobalSearch, self).execute(offset, **query)
# Cast logical objects from response to Object/Vault instances
if not self._raw_results:
self._response['results'] = [_process_result(i) for i in self._response['results']]
def entity(self, **kwargs):
"""
Returns GlobalSearch instance with the query args combined with
existing set with AND.
kwargs can contain only one entity, entity_type as parameter name and entity as its value.
If entity is already set for the GlobalSearch, it will be overridden.
"""
if not kwargs:
raise AttributeError('Faceting requires at least one field')
return self._clone(entities=list(kwargs.items()))
def subjects(self):
"""Returns the list of subjects"""
# Executes a query to get a full API response which contains subjects list
gs = self.limit(0)
gs.execute(include_subjects=True)
return gs._response.get('subjects')
def subjects_count(self):
"""Returns the number of subjects"""
# Executes a query to get a full API response which contains subjects list
gs = self.limit(0)
gs.execute()
return gs._response.get('subjects_count')
def vaults(self):
"""Returns the list of vaults"""
# Executes a query to get a full API response which contains vaults list
gs = self.limit(0)
gs.execute(include_vaults=True)
return gs._response.get('vaults')
| mit | b957256d6bcd47fb671f211075f6f08a | 33.502222 | 98 | 0.566662 | 4.569158 | false | false | false | false |
solvebio/solvebio-python | solvebio/resource/solveobject.py | 1 | 3113 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
import sys
from ..client import client
from .util import json
def convert_to_solve_object(resp, **kwargs):
from . import types
_client = kwargs.pop('client', None)
if isinstance(resp, list):
return [convert_to_solve_object(i, client=_client) for i in resp]
elif isinstance(resp, dict) and not isinstance(resp, SolveObject):
resp = resp.copy()
klass_name = resp.get('class_name')
if isinstance(klass_name, six.string_types):
klass = types.get(klass_name, SolveObject)
else:
klass = SolveObject
return klass.construct_from(resp, client=_client)
else:
return resp
class SolveObject(dict):
"""Base class for all SolveBio API resource objects"""
ID_ATTR = 'id'
# Allows pre-setting a SolveClient
_client = None
def __init__(self, id=None, **params):
super(SolveObject, self).__init__()
self._client = params.pop('client', self._client or client)
# store manually updated values for partial updates
self._unsaved_values = set()
if id:
self[self.ID_ATTR] = id
def __setattr__(self, k, v):
if k[0] == '_' or k in self.__dict__:
return super(SolveObject, self).__setattr__(k, v)
else:
self[k] = v
def __getattr__(self, k):
if k[0] == '_':
raise AttributeError(k)
try:
return self[k]
except KeyError as err:
raise AttributeError(*err.args)
def __setitem__(self, k, v):
super(SolveObject, self).__setitem__(k, v)
self._unsaved_values.add(k)
@classmethod
def construct_from(cls, values, **kwargs):
"""Used to create a new object from an HTTP response"""
instance = cls(values.get(cls.ID_ATTR), **kwargs)
instance.refresh_from(values)
return instance
def refresh_from(self, values):
self.clear()
self._unsaved_values = set()
for k, v in six.iteritems(values):
super(SolveObject, self).__setitem__(
k, convert_to_solve_object(v, client=self._client))
def request(self, method, url, **kwargs):
response = self._client.request(method, url, **kwargs)
return convert_to_solve_object(response, client=self._client)
def __repr__(self):
if isinstance(self.get('class_name'), six.string_types):
ident_parts = [self.get('class_name')]
else:
ident_parts = [type(self).__name__]
if isinstance(self.get(self.ID_ATTR), int):
ident_parts.append(
'%s=%d' % (self.ID_ATTR, self.get(self.ID_ATTR),))
_repr = '<%s at %s> JSON: %s' % (
' '.join(ident_parts), hex(id(self)), str(self))
if sys.version_info[0] < 3:
return _repr.encode('utf-8')
return _repr
def __str__(self):
return json.dumps(self, sort_keys=True, indent=2)
@property
def solvebio_id(self):
return self.id
| mit | 49df853c18c92b717fb6556146f85db9 | 27.559633 | 73 | 0.568583 | 3.723684 | false | false | false | false |
solvebio/solvebio-python | solvebio/utils/tabulate.py | 1 | 20872 | # -*- coding: utf-8 -*-
#
# This file contains code from python-tabulate, modified for SolveBio
#
# Copyright © 2011-2013 Sergey Astanin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
from __future__ import absolute_import
from six.moves import map
from six.moves import range
from six.moves import zip
from six import string_types
from collections import namedtuple
from platform import python_version_tuple
import re
from .printing import TTY_COLS
if python_version_tuple()[0] < "3":
from itertools import izip_longest
_none_type = type(None)
_int_type = int
_float_type = float
_text_type = str
_binary_type = str
else:
from itertools import zip_longest as izip_longest
from functools import reduce
_none_type = type(None)
_int_type = int
_float_type = float
_text_type = str
_binary_type = bytes
__all__ = ["tabulate"]
__version__ = "0.6"
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "usecolons",
"with_header_hide",
"without_header_hide"])
_format_defaults = {"padding": 0,
"usecolons": False,
"with_header_hide": [],
"without_header_hide": []}
_table_formats = {"simple":
TableFormat(lineabove=None,
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
usecolons=False,
with_header_hide=["linebelow"],
without_header_hide=[]),
"plain":
TableFormat(None, None, None, None,
DataRow("", " ", ""), DataRow("", " ", ""),
**_format_defaults),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
usecolons=False,
with_header_hide=[],
without_header_hide=["linebelowheader"]),
"pipe":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "|", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
usecolons=True,
with_header_hide=[],
without_header_hide=[]),
"orgmode":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
usecolons=False,
with_header_hide=[],
without_header_hide=["linebelowheader"])}
_invisible_codes = re.compile(r'\x1b\[\d*m') # ANSI color codes
def simple_separated_format(separator):
"""
Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\t") ; \
tabulate([["foo", 1], ["spam", 23]], \
tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=None, datarow=DataRow('', separator, ''),
**_format_defaults)
def _isconvertible(conv, string):
try:
conv(string) # noqa
return True
except (TypeError, ValueError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is int or \
(isinstance(string, _binary_type) or
isinstance(string, string_types)) and \
_isconvertible(int, string)
def _type(string, has_invisible=True):
"""
The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif _isint(string):
return _int_type
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""
Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""
Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') \
== ' \u044f\u0439\u0446\u0430'
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) \
if has_invisible else width
fmt = "{0:>%ds}" % iwidth
return fmt.format(s)
def _padright(width, s, has_invisible=True):
"""
Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') \
== '\u044f\u0439\u0446\u0430 '
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) \
if has_invisible else width
fmt = "{0:<%ds}" % iwidth
return fmt.format(s)
def _padboth(width, s, has_invisible=True):
"""
Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') \
== ' \u044f\u0439\u0446\u0430 '
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) \
if has_invisible else width
fmt = "{0:^%ds}" % iwidth
return fmt.format(s)
def _strip_invisible(s):
"Remove invisible ANSI color codes."
return re.sub(_invisible_codes, "", s)
def _visible_width(s):
"""
Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return len(_strip_invisible(s))
else:
return len(_text_type(s))
def _align_column(strings, alignment, minwidth=0, has_invisible=True):
"""
[string] -> [padded_string]
>>> list(map(str,_align_column( \
["12.345", "-1234.5", "1.23", "1234.5", \
"1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', \
' 1234.5 ', ' 1e+234 ', ' 1.0e234']
"""
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment in "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment in "decimal":
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
else:
strings = [s.strip() for s in strings]
padfn = _padright
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
maxwidth = max(max(list(map(width_fn, strings))), minwidth)
padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings]
return padded_strings
def _more_generic(type1, type2):
types = {_none_type: 0, int: 1, float: 2, _text_type: 4}
invtypes = {4: _text_type, 2: float, 1: int, 0: _none_type}
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True):
"""
The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
"""
types = [_type(s, has_invisible) for s in strings]
return reduce(_more_generic, types, int)
def _format(val, valtype, floatfmt, missingval=""):
"""
Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', \
'\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \
\\u0446\\u0438\\u0444\\u0440\\u0430\\n-------\
-------\\n\\u0430\\u0437 \
2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _binary_type, _text_type]:
return "{0}".format(val)
elif valtype is float:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width):
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers, sort=True):
"""
Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* 2D NumPy arrays
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = list(tabular_data.keys())
# columns have to be transposed
rows = list(izip_longest(*list(tabular_data.values())))
elif hasattr(tabular_data, "index"):
# values is a property, has .index then
# it's likely a pandas.DataFrame (pandas 0.11.0)
keys = list(tabular_data.keys())
# values matrix doesn't need to be transposed
vals = tabular_data.values
names = tabular_data.index
rows = [[v] + list(row) for v, row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict "
"or a DataFrame")
if headers == "keys":
headers = list(map(_text_type, keys)) # headers should be strings
else: # it's, as usual, an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if headers == "keys" and len(rows) > 0: # keys are column indices
headers = list(map(_text_type, list(range(len(rows[0])))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(headers)
rows = list(map(list, rows))
if sort and len(rows) > 1:
rows = sorted(rows, key=lambda x: x[0])
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""] * (ncols - nhs) + headers
return rows, headers
def _build_row(cells, padding, begin, sep, end):
"Return a string which represents a row of data cells."
pad = " " * padding
padded_cells = [pad + cell + pad for cell in cells]
# SolveBio: we're only displaying Key-Value tuples (dimension of 2).
# enforce that we don't wrap lines by setting a max
# limit on row width which is equal to TTY_COLS (see printing)
rendered_cells = (begin + sep.join(padded_cells) + end).rstrip()
if len(rendered_cells) > TTY_COLS:
if not cells[-1].endswith(" ") and not cells[-1].endswith("-"):
terminating_str = " ... "
else:
terminating_str = ""
rendered_cells = "{0}{1}{2}".format(
rendered_cells[:TTY_COLS - len(terminating_str) - 1],
terminating_str, end)
return rendered_cells
def _build_line(colwidths, padding, begin, fill, sep, end):
"Return a string which represents a horizontal line."
cells = [fill * (w + 2 * padding) for w in colwidths]
return _build_row(cells, 0, begin, sep, end)
def _mediawiki_cell_attrs(row, colaligns):
"Prefix every cell in a row with an HTML alignment attribute."
alignment = {"left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| '}
row2 = [alignment[a] + c for c, a in zip(row, colaligns)]
return row2
def _line_segment_with_colons(linefmt, align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
fill = linefmt.hline
w = colwidth
if align in ["right", "decimal"]:
return (fill[0] * (w - 1)) + ":"
elif align == "center":
return ":" + (fill[0] * (w - 2)) + ":"
elif align == "left":
return ":" + (fill[0] * (w - 1))
else:
return fill[0] * w
def _format_table(fmt, headers, rows, colwidths, colaligns):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if headers else fmt.without_header_hide
pad = fmt.padding
headerrow = fmt.headerrow if fmt.headerrow else fmt.datarow
if fmt.lineabove and "lineabove" not in hidden:
lines.append(_build_line(colwidths, pad, *fmt.lineabove))
if headers:
lines.append(_build_row(headers, pad, *headerrow))
if fmt.linebelowheader and "linebelowheader" not in hidden:
begin, fill, sep, end = fmt.linebelowheader
if fmt.usecolons:
segs = [
_line_segment_with_colons(fmt.linebelowheader, a, w + 2 * pad)
for w, a in zip(colwidths, colaligns)]
lines.append(_build_row(segs, 0, begin, sep, end))
else:
lines.append(_build_line(colwidths, pad, *fmt.linebelowheader))
if rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in rows[:-1]:
lines.append(_build_row(row, pad, *fmt.datarow))
lines.append(_build_line(colwidths, pad, *fmt.linebetweenrows))
# the last row without a line below
lines.append(_build_row(rows[-1], pad, *fmt.datarow))
else:
for row in rows:
lines.append(_build_row(row, pad, *fmt.datarow))
if fmt.linebelow and "linebelow" not in hidden:
lines.append(_build_line(colwidths, pad, *fmt.linebelow))
return "\n".join(lines)
def tabulate(tabular_data, headers=[], tablefmt="orgmode",
floatfmt="g", aligns=[], missingval="", sort=True, is_tsv=False):
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers,
sort=sort)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(
['\t'.join(map(_text_type, headers))] +
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval) for v in c]
for c, ct in zip(cols, coltypes)]
# align columns
if not aligns:
# dynamic alignment by col type
aligns = ["decimal" if ct in [int, float] else "left"
for ct in coltypes]
minwidths = [width_fn(h) + 2 for h in headers] if headers \
else [0] * len(cols)
cols = [_align_column(c, a, minw, has_invisible)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
minwidths = [max(minw, width_fn(c[0]))
for minw, c in zip(minwidths, cols)]
headers = [_align_header(h, a, minw)
for h, a, minw in zip(headers, aligns, minwidths)]
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["orgmode"])
# make sure values don't have newlines or tabs in them, except for tsv output_format where
# we have to add spaces in order to simulate tab separators
rows = [[str(c).replace('\n', '').replace('\t', ' ' if is_tsv else '').replace('\r', '')
for c in r] for r in rows]
return _format_table(tablefmt, headers, rows, minwidths, aligns)
if __name__ == "__main__":
data = [
("gene_symbols", ["CPB1"]),
("clinical_significance", "other"),
("clinical_origin", ["somatic"]),
("alternate_alleles", ["T"]), ]
print(tabulate(data,
headers=('Fields', 'Data'),
aligns=('right', 'left'), sort=True))
print(tabulate(data,
headers=('Fields', 'Data'),
aligns=('right', 'left'), sort=False))
| mit | 28846d259d928e8a00ff9558cbdcd68e | 32.128571 | 94 | 0.544392 | 3.724969 | false | false | false | false |
solvebio/solvebio-python | solvebio/utils/humanize.py | 1 | 2718 | # -*- coding: utf-8 -*-
#
# From Humanize (jmoiron/humanize)
#
# Copyright (c) 2010 Jason Moiron and Contributors
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Bits & Bytes related humanization."""
suffixes = {
'decimal': ('kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'),
'binary': ('KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'),
'gnu': "KMGTPEZY",
}
def naturalsize(value, binary=False, gnu=False, format='%.1f'):
"""Format a number of byteslike a human readable filesize (eg. 10 kB). By
default, decimal suffixes (kB, MB) are used. Passing binary=true will use
binary suffixes (KiB, MiB) are used and the base will be 2**10 instead of
10**3. If ``gnu`` is True, the binary argument is ignored and GNU-style
(ls -sh style) prefixes are used (K, M) with the 2**10 definition.
Non-gnu modes are compatible with jinja2's ``filesizeformat`` filter."""
if gnu:
suffix = suffixes['gnu']
elif binary:
suffix = suffixes['binary']
else:
suffix = suffixes['decimal']
base = 1024 if (gnu or binary) else 1000
bytes = float(value)
if bytes == 1 and not gnu:
return '1 Byte'
elif bytes < base and not gnu:
return '%d Bytes' % bytes
elif bytes < base and gnu:
return '%dB' % bytes
for i, s in enumerate(suffix):
unit = base ** (i + 2)
if bytes < unit and not gnu:
return (format + ' %s') % ((base * bytes / unit), s)
elif bytes < unit and gnu:
return (format + '%s') % ((base * bytes / unit), s)
if gnu:
return (format + '%s') % ((base * bytes / unit), s)
return (format + ' %s') % ((base * bytes / unit), s)
| mit | a3088f8ce038cf22041c247cc125e58a | 38.391304 | 78 | 0.651582 | 3.692935 | false | false | false | false |
sigopt/sigopt-python | controller/controller/manage_pods.py | 1 | 5838 | # Copyright © 2022 Intel Corporation
#
# SPDX-License-Identifier: MIT
from http import HTTPStatus
from kubernetes.client.exceptions import ApiException as KubernetesApiException
import logging
import signal
import threading
from sigopt.run_context import RunContext
from controller.create_pod import create_run_pod
from controller.event_repeater import EventRepeater
from controller.pod_status import is_pod_active
from controller.refill_pods import RefillExperimentPodsThread
from controller.run_state import RunState
from controller.settings import ExperimentSettings, RunSettings
from controller.watch_pods import WatchPodsThread
def create_run_state(sigopt_settings, pod, k8s_settings):
run_id = pod.metadata.labels["run"]
sigopt_conn = sigopt_settings.conn
run = sigopt_conn.training_runs(run_id).fetch()
run_context = RunContext(sigopt_conn, run)
return RunState(run_context, sigopt_settings, k8s_settings, pod.metadata.name)
def set_events_on_sigterm(events):
def handler(signum, frame):
logging.error("sigterm received")
for event in events:
event.set()
signal.signal(signal.SIGTERM, handler)
class RunPodsManager:
def __init__(self, k8s_settings, run_name, run_id, sigopt_settings):
self.k8s_settings = k8s_settings
self.run_name = run_name
self.run_id = run_id
self.sigopt_settings = sigopt_settings
self.run_states = dict()
self.pod_modified_event = threading.Event()
self.stop_event = threading.Event()
self.watcher_thread = WatchPodsThread(
k8s_settings=self.k8s_settings,
label_selector=f"run-name={self.run_name},type=run",
run_states=self.run_states,
pods_modified_event=self.pod_modified_event,
stop_threads_event=self.stop_event,
)
self.logger = logging.getLogger("controller:RunPodsManager")
@classmethod
def from_env(cls):
s = RunSettings()
return cls(
k8s_settings=s.k8s_settings,
run_name=s.run_name,
run_id=s.run_id,
sigopt_settings=s.sigopt_settings,
)
def start(self):
sigterm_event = threading.Event()
set_events_on_sigterm([sigterm_event, self.stop_event])
try:
pod = self.k8s_settings.api.read_namespaced_pod(self.run_name, self.k8s_settings.namespace)
self.logger.info("found existing pod %s", self.run_name)
run_state = create_run_state(self.sigopt_settings, pod, self.k8s_settings)
except KubernetesApiException as kae:
if kae.status != HTTPStatus.NOT_FOUND:
raise
sigopt_conn = self.sigopt_settings.conn
run = sigopt_conn.training_runs(self.run_id).fetch()
run_context = RunContext(sigopt_conn, run)
run_state = RunState(run_context, self.sigopt_settings, self.k8s_settings, self.run_name)
pod = create_run_pod(
k8s_settings=self.k8s_settings,
run_context=run_context,
)
self.logger.info("created pod %s", pod.metadata.name)
self.run_states.update({self.run_name: run_state})
self.watcher_thread.start()
try:
while not self.stop_event.is_set() and not run_state.is_finished():
try:
self.stop_event.wait(timeout=1)
except TimeoutError:
pass
except KeyboardInterrupt:
pass
self.stop_event.set()
self.watcher_thread.join()
if self.watcher_thread.exception_occurred.is_set():
raise Exception("An exception occurred in the watcher thread")
if sigterm_event.is_set():
raise Exception("Sigterm received")
class ExperimentPodsManager:
def __init__(self, k8s_settings, sigopt_settings, experiment_id):
self.k8s_settings = k8s_settings
self.sigopt_settings = sigopt_settings
self.experiment_id = experiment_id
self.run_label_selector = f"experiment={self.experiment_id},type=run"
self.run_state = dict()
self.manage_pods_event = threading.Event()
self.stop_threads_event = threading.Event()
self.management_event_repeater = EventRepeater(5, self.manage_pods_event)
self.refiller_thread = RefillExperimentPodsThread(
self.k8s_settings,
self.sigopt_settings,
self.experiment_id,
self.run_state,
self.manage_pods_event,
stop_threads_event=self.stop_threads_event,
)
self.watcher_thread = WatchPodsThread(
self.k8s_settings,
self.run_label_selector,
self.run_state,
self.manage_pods_event,
stop_threads_event=self.stop_threads_event,
)
@classmethod
def from_env(cls):
s = ExperimentSettings()
return cls(
k8s_settings=s.k8s_settings,
sigopt_settings=s.sigopt_settings,
experiment_id=s.experiment_id,
)
def start(self):
sigterm_event = threading.Event()
set_events_on_sigterm([sigterm_event, self.stop_threads_event])
self.run_state.update({
pod.metadata.name: create_run_state(self.sigopt_settings, pod, self.k8s_settings)
for pod in self.k8s_settings.api.list_namespaced_pod(
self.k8s_settings.namespace,
label_selector=self.run_label_selector,
).items
if is_pod_active(pod)
})
self.manage_pods_event.set()
threads = [self.refiller_thread, self.watcher_thread]
for thread in threads:
thread.start()
self.management_event_repeater.start()
try:
while not self.stop_threads_event.is_set():
try:
self.stop_threads_event.wait(timeout=5)
except TimeoutError:
pass
except KeyboardInterrupt:
pass
finally:
self.management_event_repeater.cancel()
self.stop_threads_event.set()
self.manage_pods_event.set()
for thread in threads:
thread.join()
if any(thread.exception_occurred.is_set() for thread in threads):
raise Exception("An exception ocurred in at least 1 thread")
if sigterm_event.is_set():
raise Exception("Sigterm received")
| mit | de6e78866cbe1854d9780a02030dd317 | 33.744048 | 97 | 0.692479 | 3.385731 | false | false | false | false |
sigopt/sigopt-python | test/xgboost/test_experiment_config.py | 1 | 9164 | # Copyright © 2022 Intel Corporation
#
# SPDX-License-Identifier: MIT
import copy
from mock import Mock
import pytest
from sigopt.xgboost.constants import (
DEFAULT_CLASSIFICATION_METRIC,
DEFAULT_EVALS_NAME,
DEFAULT_REGRESSION_METRIC,
PARAMETER_INFORMATION,
SUPPORTED_AUTOBOUND_PARAMS,
)
from sigopt.xgboost.experiment import XGBExperiment
EXPERIMENT_CONFIG_BASE = dict(
name='Single metric optimization',
type='offline',
parameters=[
dict(
name='eta',
type='double',
bounds={'min': 0.1, 'max': 0.5}
),
dict(
name='max_depth',
type='int',
bounds={'min': 2, 'max': 6}
),
dict(
name='num_boost_round',
type='int',
bounds={'min': 2, 'max': 6}
)
],
metrics=[
dict(
name='accuracy',
strategy='optimize',
objective='maximize'
)
],
parallel_bandwidth=1,
budget=2
)
PARAMS_BASE = {
'num_class': 3,
'lambda': 1,
}
def verify_experiment_config_integrity(experiment_config):
assert isinstance(experiment_config, dict)
assert 'type' in experiment_config
assert 'parameters' in experiment_config
assert 'metrics' in experiment_config
assert 'budget' in experiment_config
parameters = experiment_config['parameters']
for parameter in parameters:
assert 'name' in parameter
assert 'type' in parameter
if parameter['type'] in ['int', 'double']:
assert 'bounds' in parameter
else:
assert 'categorical_values' in parameter
metrics = experiment_config['metrics']
for metric in metrics:
assert 'name' in metric
assert 'strategy' in metric
assert 'objective' in metric
def parse_and_create_aiexperiment_config(experiment_config, params):
num_boost_round = None
run_options = None
early_stopping_rounds = 10
d_train = Mock()
evals = Mock()
xgb_experiment = XGBExperiment(
experiment_config,
d_train,
evals,
params,
num_boost_round,
run_options,
early_stopping_rounds,
)
xgb_experiment.parse_and_create_metrics()
xgb_experiment.parse_and_create_parameters()
return xgb_experiment
class TestExperimentConfig:
def verify_integrity(self, experiment_config, params):
xgb_experiment = parse_and_create_aiexperiment_config(experiment_config, params)
verify_experiment_config_integrity(xgb_experiment.experiment_config_parsed)
def test_base(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
self.verify_integrity(experiment_config, params)
def test_config_no_search_space(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
del experiment_config['parameters']
self.verify_integrity(experiment_config, params)
def test_config_search_space_name_only(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
for parameter in experiment_config['parameters']:
del parameter['type']
del parameter['bounds']
self.verify_integrity(experiment_config, params)
def test_config_detect_log_transformation(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
experiment_config['parameters'] = [dict(name='eta')]
xgb_experiment = parse_and_create_aiexperiment_config(experiment_config, params)
assert xgb_experiment.experiment_config_parsed['parameters'][0]['transformation'] == 'log'
def test_config_search_space_mixed(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
del experiment_config['parameters'][2]['type']
del experiment_config['parameters'][2]['bounds']
self.verify_integrity(experiment_config, params)
def test_config_search_space_wrong_type(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
experiment_config['parameters'][0]['type'] = 'int'
del experiment_config['parameters'][0]['bounds']
with pytest.raises(ValueError):
self.verify_integrity(experiment_config, params)
def test_config_search_space_no_type(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
del experiment_config['parameters'][0]['type']
del experiment_config['parameters'][1]['type']
del experiment_config['parameters'][2]['type']
self.verify_integrity(experiment_config, params)
def test_config_search_space_categories_no_type(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
experiment_config['parameters'].append(
dict(
name='tree_method',
categorical_values=['auto', 'exact', 'hist', 'gpu_hist'],
)
)
self.verify_integrity(experiment_config, params)
def test_config_search_space_no_categorical_values(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
experiment_config['parameters'].append(
dict(
name='tree_method',
type='categorical',
)
)
with pytest.raises(ValueError):
self.verify_integrity(experiment_config, params)
def test_config_search_space_wrong_categories(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
experiment_config['parameters'].append(
dict(
name='tree_method',
type='categorical',
categorical_values=['auto', 'exact', 'hist', 'gpu_hist', 'WrongCategory'],
)
)
with pytest.raises(ValueError):
self.verify_integrity(experiment_config, params)
def test_config_search_space_fake_categories(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
experiment_config['parameters'].append(
dict(
name='foo',
type='categorical',
categorical_values=['auto', 'exact', 'hist', 'gpu_hist', 'WrongCategory'],
)
)
self.verify_integrity(experiment_config, params)
def test_config_no_supported_bounds(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
experiment_config['parameters'].append(dict(name='max_leaves'))
params = copy.deepcopy(PARAMS_BASE)
with pytest.raises(ValueError):
self.verify_integrity(experiment_config, params)
def test_autodetect_metric_from_objective(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
del experiment_config['metrics']
params = copy.deepcopy(PARAMS_BASE)
params['objective'] = 'binary:logistic'
xgb_experiment = parse_and_create_aiexperiment_config(experiment_config, params)
assert xgb_experiment.experiment_config_parsed['metrics'][0]['name'] == '-'.join(
(DEFAULT_EVALS_NAME, DEFAULT_CLASSIFICATION_METRIC)
)
params['objective'] = 'multi:softmax'
xgb_experiment = parse_and_create_aiexperiment_config(experiment_config, params)
assert xgb_experiment.experiment_config_parsed['metrics'][0]['name'] == '-'.join(
(DEFAULT_EVALS_NAME, DEFAULT_CLASSIFICATION_METRIC)
)
params['objective'] = 'reg:squarederror'
xgb_experiment = parse_and_create_aiexperiment_config(experiment_config, params)
assert xgb_experiment.experiment_config_parsed['metrics'][0]['name'] == '-'.join(
(DEFAULT_EVALS_NAME, DEFAULT_REGRESSION_METRIC)
)
def test_config_metric_string_only(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
experiment_config['metrics'] = 'accuracy'
self.verify_integrity(experiment_config, params)
def test_config_metric_list(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
experiment_config['metrics'].append(dict(
name='f1',
strategy='store',
objective='maximize'
))
params = copy.deepcopy(PARAMS_BASE)
self.verify_integrity(experiment_config, params)
def test_config_param_defined_twice(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
params['eta'] = 0.1
with pytest.raises(ValueError):
self.verify_integrity(experiment_config, params)
def test_config_num_boost_round_defined_twice(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
params['num_boost_round'] = 10
with pytest.raises(ValueError):
self.verify_integrity(experiment_config, params)
def test_config_wrong_metric_string(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
experiment_config['metrics'] = 'NOT_A_METRIC_SUPPORTED'
params = copy.deepcopy(PARAMS_BASE)
with pytest.raises(ValueError):
self.verify_integrity(experiment_config, params)
def test_config_wrong_metric_list(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
experiment_config['metrics'][0]['name'] = 'NOT_A_METRIC_SUPPORTED'
params = copy.deepcopy(PARAMS_BASE)
with pytest.raises(ValueError):
self.verify_integrity(experiment_config, params)
| mit | d343d515070d5d3130c161251b9562d2 | 32.937037 | 94 | 0.699007 | 3.744585 | false | true | false | false |
sigopt/sigopt-python | controller/controller/watch_pods.py | 1 | 1523 | # Copyright © 2022 Intel Corporation
#
# SPDX-License-Identifier: MIT
from kubernetes import watch
from controller.thread import ControllerThread
from controller.pod_status import is_pod_finished
from controller.k8s_constants import K8sEvent
class WatchPodsThread(ControllerThread):
def __init__(self, *args, stop_threads_event, **kwargs):
super().__init__(target=self.watch_pods, stop_threads_event=stop_threads_event, args=args, kwargs=kwargs)
def watch_pods(self, k8s_settings, label_selector, run_states, pods_modified_event):
logger = self.logger
logger.info("starting pod watcher loop")
watcher = watch.Watch()
while not self.stop_threads_event.is_set():
logger.debug("restarting watch stream")
for event in watcher.stream(
k8s_settings.api.list_namespaced_pod,
k8s_settings.namespace,
label_selector=label_selector,
timeout_seconds=5,
):
if self.stop_threads_event.is_set():
break
pod = event["object"]
pod_name = pod.metadata.name
event_type = event["type"]
logger.debug("event %s, pod %s", event_type, pod_name)
try:
run_state = run_states[pod_name]
except KeyError:
continue
run_state.process_pod_event(event)
if event_type == K8sEvent.DELETED or is_pod_finished(pod):
del run_states[pod_name]
logger.info("pod removed %s", pod_name)
pods_modified_event.set()
logger.info("exited pod watcher loop")
| mit | 72c738336db46a3b876f32a0b2497c03 | 36.121951 | 109 | 0.661629 | 3.615202 | false | false | false | false |
sigopt/sigopt-python | controller/controller/run_state.py | 1 | 3210 | # Copyright © 2022 Intel Corporation
#
# SPDX-License-Identifier: MIT
import collections
import logging
from sigopt.exception import ApiException
from sigopt.run_context import RunContext
from controller.k8s_constants import K8sPhase
from controller.pod_status import is_pod_phase_active, is_pod_phase_finished
PodState = collections.namedtuple("PodState", [
"phase",
"termination_info",
])
def get_relevant_state_from_pod_event(event):
pod = event["object"]
phase = pod.status.phase
termination_info = None
container_statuses = pod.status.container_statuses
if container_statuses:
container_terminated = container_statuses[0].state.terminated
if container_terminated:
termination_info = (container_terminated.reason, container_terminated.exit_code)
return PodState(
phase=phase,
termination_info=termination_info,
)
# NOTE(taylor): this class maintains a local state of the run pod
# and updates the SigOpt API when there are changes
class RunState:
@classmethod
def create_from_pod(cls, sigopt_settings, k8s_settings, pod):
run_id = pod.metadata.labels["run"]
sigopt_conn = sigopt_settings.conn
run = sigopt_conn.training_runs(run_id).fetch()
run_context = RunContext(sigopt_conn, run)
return cls(run_context, sigopt_settings, k8s_settings, pod.metadata.name)
def __init__(self, run_context, sigopt_settings, k8s_settings, pod_name):
self.state = None
self.run_context = run_context
self.sigopt_settings = sigopt_settings
self.k8s_settings = k8s_settings
self.pod_name = pod_name
self.logger = logging.getLogger("controller:RunState")
def get_phase(self):
phase = K8sPhase.PENDING
if self.state:
phase = self.state.phase
return phase
def is_active(self):
return is_pod_phase_active(self.get_phase())
def is_finished(self):
return is_pod_phase_finished(self.get_phase())
def process_pod_event(self, event):
new_state = get_relevant_state_from_pod_event(event)
self.update_state(new_state)
def update_state(self, new_state):
self.maybe_update_termination_info(new_state.termination_info)
self.maybe_update_phase(new_state.phase)
self.state = new_state
def update_run_logs(self):
if self.sigopt_settings.log_collection_enabled:
logs = self.k8s_settings.api.read_namespaced_pod_log(self.pod_name, self.k8s_settings.namespace)
self.run_context.set_logs({"all": logs})
def maybe_update_phase(self, new_phase):
if not self.state or new_phase != self.state.phase:
self.run_context.log_metadata("pod_phase", new_phase)
if is_pod_phase_finished(new_phase):
self.update_run_logs()
try:
self.run_context.end(exception=None if new_phase == K8sPhase.SUCCEEDED else "PodFailed")
except ApiException:
pass
def maybe_update_termination_info(self, new_termination_info):
if not self.state or new_termination_info != self.state.termination_info:
if new_termination_info:
termination_reason, exit_code = new_termination_info
self.run_context.log_metadata("termination_reason", termination_reason)
self.run_context.log_metadata("exit_code", exit_code)
| mit | be8096064121d3de444e55ce923fb4d6 | 33.880435 | 102 | 0.715799 | 3.439443 | false | false | false | false |
sigopt/sigopt-python | test/runs/test_local_run_context.py | 1 | 2035 | # Copyright © 2022 Intel Corporation
#
# SPDX-License-Identifier: MIT
from sigopt.local_run_context import LocalRunContext
import pytest
class TestLocalRunContext(object):
@pytest.fixture
def context(self):
return LocalRunContext()
@pytest.fixture
def params(self):
return {
'x': 1.0,
'y': 2.0
}
@pytest.fixture
def metrics(self):
return {
'v0': 1,
'v1': 2.0,
}
def test_init(self):
name = 'test0'
metadata = {'m0': 1, 'm2': 2.0}
context = LocalRunContext(name=name, metadata=metadata)
run = context.get()
assert run['name'] == name
assert run['metadata'] == metadata
@pytest.mark.parametrize('state', ['completed', 'failed'])
def test_log_state(self, context, state):
context.log_state(state)
run = context.get()
assert run['state'] == state
def test_log_failure(self, context):
context.log_failure()
run = context.get()
assert run['state'] == 'failed'
def test_log_metrics(self, context, metrics):
context.log_metrics()
run = context.get()
run['values'] == metrics
@pytest.mark.parametrize('source, source_sort, source_default_show', [
('s0', 10, True),
('s0', 20, False),
('s0', None, None),
(None, 20, False),
])
def test_log_parameters(self, context, params, source, source_sort, source_default_show):
if source_sort is not None:
source_meta = {'sort': source_sort, 'default_show': source_default_show}
else:
source_meta = None
context.log_parameters(params, source, source_meta)
run = context.get()
assert run['assignments'] == params
if source is not None:
assert run['assignments_meta'] == {p: {'source': source} for p in params}
if source_sort is not None:
assert run['assignments_sources'][source] == {'sort': source_sort, 'default_show': source_default_show}
else:
assert 'assignments_sources' not in run
else:
assert 'assignments_meta' not in run and 'assignments_sources' not in run
| mit | a62b1cb43590bfe4a86b20b8cd1567ad | 26.863014 | 111 | 0.631268 | 3.506897 | false | true | false | false |
sigopt/sigopt-python | sigopt/objects.py | 1 | 15075 | # Copyright © 2022 Intel Corporation
#
# SPDX-License-Identifier: MIT
import copy
import warnings
from .compat import json
from .lib import is_sequence, is_mapping, is_integer, is_number, is_numpy_array, is_string
class ListOf(object):
def __init__(self, typ):
self.type = typ
def __call__(self, value):
return [self.type(v) for v in value]
class MapOf(object):
def __init__(self, value_type, key_type=str):
self.value_type = value_type
self.key_type = key_type
def __call__(self, value):
d = {self.key_type(k):self.value_type(v) for k, v in value.items()}
return d
def DictField(name, type=str):
return lambda value: type(value[name])
Any = lambda x: x
class Field(object):
def __init__(self, typ):
self.type = typ
def __call__(self, value):
if value is None:
return None
return self.type(value)
class DeprecatedField(Field):
def __init__(self, typ, recommendation=None):
super().__init__(typ)
self.recommendation = (' ' + recommendation) if recommendation else ''
def __call__(self, value):
warnings.warn(
'This field has been deprecated and may be removed in a future version.{0}'.format(self.recommendation),
DeprecationWarning,
)
return super().__call__(value)
class BaseApiObject(object):
def __getattribute__(self, name):
value = object.__getattribute__(self, name)
if isinstance(value, Field):
return value(self._body.get(name))
return value
def __setattr__(self, name, value):
field = self._get_field(name)
if field:
value = ApiObject.as_json(value)
self._body[name] = value
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
field = self._get_field(name)
if field:
del self._body[name]
else:
object.__delattr__(self, name)
def _get_field(self, name):
try:
subvalue = object.__getattribute__(self, name)
except AttributeError:
return None
else:
return subvalue if isinstance(subvalue, Field) else None
def _repr_keys(self):
attributes = dir(self)
attributes = [a for a in attributes if not a.startswith('_')]
attributes = [a for a in attributes if not isinstance(getattr(self.__class__, a), DeprecatedField)]
attributes = [a for a in attributes if not callable(getattr(self, a))]
keys_in_json = set(ApiObject.as_json(self._body).keys())
return keys_in_json.intersection(set(attributes))
@staticmethod
def _emit_repr(object_name, values_mapping):
if values_mapping:
return '{0}(\n{1}\n)'.format(
object_name,
'\n'.join([
' {}={},'.format(key, ApiObject.dumps(value, indent_level=2).lstrip())
for key, value
in values_mapping.items()
]),
)
return '{0}()'.format(object_name)
def __repr__(self):
keys = self._repr_keys()
values = {key: getattr(self, key) for key in keys}
return BaseApiObject._emit_repr(self.__class__.__name__, values)
def to_json(self):
return copy.deepcopy(self._body)
class ApiObject(BaseApiObject):
def __init__(self, body, bound_endpoint=None, retrieve_params=None):
super().__init__()
object.__setattr__(self, '_body', body)
object.__setattr__(self, '_bound_endpoint', bound_endpoint)
object.__setattr__(self, '_retrieve_params', retrieve_params)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self._body == other._body
)
@staticmethod
def as_json(obj):
if isinstance(obj, BaseApiObject):
return obj.to_json()
if is_mapping(obj):
c = {}
for key in obj:
c[key] = ApiObject.as_json(obj[key])
return c
if is_numpy_array(obj):
return ApiObject.as_json(obj.tolist())
if is_sequence(obj):
return [ApiObject.as_json(c) for c in obj]
if is_integer(obj):
return int(obj)
if is_number(obj):
return float(obj)
return obj
@staticmethod
def dumps(obj, indent_level=0):
indent = ' ' * indent_level
if isinstance(obj, BaseApiObject):
return '{0}{1}'.format(indent, str(obj).replace('\n', '\n{0}'.format(indent)))
if is_mapping(obj):
if obj:
return '{0}{{\n{1},\n{0}}}'.format(
indent,
',\n'.join([
' {0}"{1}"={2}'.format(
indent,
key,
ApiObject.dumps(obj[key], indent_level=indent_level + 2).lstrip()
)
for key
in obj
])
)
return '{0}{1}'.format(indent, str(obj))
if is_numpy_array(obj):
return ApiObject.dumps(obj.tolist(), indent_level=indent_level)
if is_sequence(obj):
if obj:
return '{0}[\n{1},\n{0}]'.format(
indent,
',\n'.join([
ApiObject.dumps(c, indent_level=indent_level + 2)
for c
in obj
])
)
return '{0}{1}'.format(indent, str(obj))
if is_integer(obj):
return '{0}{1}'.format(indent, str(int(obj)))
if is_number(obj):
return '{0}{1}'.format(indent, str(float(obj)))
if is_string(obj):
return '{0}"{1}"'.format(indent, obj)
return '{0}{1}'.format(indent, obj)
class _DictWrapper(BaseApiObject, dict):
def __init__(self, body, bound_endpoint=None, retrieve_params=None):
super().__init__()
dict.__init__(self, body)
self._bound_endpoint = bound_endpoint
self._retrieve_params = retrieve_params
@property
def _body(self):
return self
def to_json(self):
return dict(copy.deepcopy(self))
def copy(self):
return self.__class__(dict.copy(self))
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
dict.__eq__(self, other)
)
def __repr__(self):
return '{0}({1})'.format(
self.__class__.__name__,
json.dumps(
ApiObject.as_json(self._body),
indent=2,
sort_keys=True,
separators=(',', ': '),
),
)
class Assignments(_DictWrapper):
pass
class Task(ApiObject):
cost = Field(float)
name = Field(str)
class Bounds(ApiObject):
max = Field(float)
min = Field(float)
class CategoricalValue(ApiObject):
enum_index = Field(int)
name = Field(str)
class Client(ApiObject):
created = Field(int)
id = Field(str)
name = Field(str)
organization = Field(str)
class Conditional(ApiObject):
name = Field(str)
values = Field(ListOf(str))
class Conditions(_DictWrapper):
pass
class ImportancesMap(_DictWrapper):
pass
class Importances(ApiObject):
importances = Field(ImportancesMap)
class MetricImportances(ApiObject):
importances = Field(ImportancesMap)
metric = Field(str)
class Metadata(_DictWrapper):
pass
class SysMetadata(_DictWrapper):
pass
class MetricEvaluation(ApiObject):
name = Field(str)
value = Field(float)
value_stddev = Field(float)
class Metric(ApiObject):
name = Field(str)
objective = Field(str)
strategy = Field(str)
threshold = Field(float)
class Observation(ApiObject):
assignments = Field(Assignments)
created = Field(int)
experiment = Field(str)
failed = Field(bool)
id = Field(str)
metadata = Field(Metadata)
suggestion = Field(str)
task = Field(Task)
value = Field(float)
value_stddev = Field(float)
values = Field(ListOf(MetricEvaluation))
class Organization(ApiObject):
created = Field(int)
deleted = Field(bool)
id = Field(str)
name = Field(str)
class Paging(ApiObject):
after = Field(str)
before = Field(str)
class Pagination(ApiObject):
count = Field(int)
paging = Field(Paging)
def __init__(self, data_cls, body, bound_endpoint=None, retrieve_params=None):
super().__init__(body, bound_endpoint, retrieve_params)
self.data_cls = data_cls
def _repr_keys(self):
return ['data', 'count', 'paging']
def __repr__(self):
values = {
'data': self._unsafe_data,
'count': self.count,
'paging': self.paging,
}
values = {k: v for k, v in values.items() if v is not None}
return BaseApiObject._emit_repr('Pagination<{0}>'.format(self.data_cls.__name__), values)
@property
def data(self):
warnings.warn(
'The .data field only contains a single page of results, which may be incomplete for large responses.'
' Prefer to use the `.iterate_pages() to ensure that you iterate through all elements in the response.',
RuntimeWarning,
)
return self._unsafe_data
@property
def _unsafe_data(self):
return Field(ListOf(self.data_cls))(self._body.get('data'))
def iterate_pages(self):
# pylint: disable=no-member
data = self._unsafe_data
paging = self.paging or Paging({})
use_before = 'before' in self._retrieve_params or 'after' not in self._retrieve_params
while data:
for d in data:
yield d
next_paging = dict(before=paging.before) if use_before else dict(after=paging.after)
if next_paging.get('before') is not None or next_paging.get('after') is not None:
params = self._retrieve_params.copy()
if use_before:
params['before'] = paging.before
params.pop('after', None)
else:
params.pop('before', None)
params['after'] = paging.after
response = self._bound_endpoint(**params)
data = response._unsafe_data
paging = response.paging
else:
data = []
paging = None
# pylint: enable=no-member
class ParameterPrior(ApiObject):
mean = Field(float)
name = Field(str)
scale = Field(float)
shape_a = Field(float)
shape_b = Field(float)
class Parameter(ApiObject):
bounds = Field(Bounds)
categorical_values = Field(ListOf(CategoricalValue))
conditions = Field(Conditions)
default_value = Field(Any)
grid = Field(ListOf(float))
name = Field(str)
precision = Field(int)
prior = Field(ParameterPrior)
transformation = Field(str)
tunable = DeprecatedField(bool)
type = Field(str)
class Progress(ApiObject):
# observation progress fields
best_observation = DeprecatedField(Observation, recommendation='Prefer the `best_assignments` endpoint')
first_observation = Field(Observation)
last_observation = Field(Observation)
observation_count = Field(int)
observation_budget_consumed = Field(float)
# run progress fields
active_run_count = Field(int)
finished_run_count = Field(int)
total_run_count = Field(int)
remaining_budget = Field(float)
class RunsProgress(ApiObject):
active_run_count = Field(int)
finished_run_count = Field(int)
total_run_count = Field(int)
remaining_budget = Field(float)
class Suggestion(ApiObject):
assignments = Field(Assignments)
created = Field(int)
experiment = Field(str)
id = Field(str)
metadata = Field(Metadata)
state = Field(str)
task = Field(Task)
class QueuedSuggestion(ApiObject):
assignments = Field(Assignments)
created = Field(int)
experiment = Field(str)
id = Field(str)
task = Field(Task)
class ConstraintTerm(ApiObject):
name = Field(str)
weight = Field(float)
class LinearConstraint(ApiObject):
terms = Field(ListOf(ConstraintTerm))
threshold = Field(float)
type = Field(str)
class TrainingEarlyStoppingCriteria(ApiObject):
lookback_checkpoints = Field(int)
name = Field(str)
metric = Field(str)
min_checkpoints = Field(int)
type = Field(str)
class TrainingMonitor(ApiObject):
max_checkpoints = Field(int)
early_stopping_criteria = Field(ListOf(TrainingEarlyStoppingCriteria))
class Experiment(ApiObject):
budget = Field(float)
can_be_deleted = DeprecatedField(bool)
client = Field(str)
conditionals = Field(ListOf(Conditional))
created = Field(int)
development = Field(bool)
id = Field(str)
linear_constraints = Field(ListOf(LinearConstraint))
metadata = Field(Metadata)
metric = DeprecatedField(
Metric,
recommendation='Prefer the `metrics` field (see https://sigopt.com/docs/objects/experiment)'
)
metrics = Field(ListOf(Metric))
name = Field(str)
num_solutions = Field(int)
observation_budget = Field(int)
parameters = Field(ListOf(Parameter))
parallel_bandwidth = Field(int)
progress = Field(Progress)
project = Field(str)
state = Field(str)
tasks = Field(ListOf(Task))
training_monitor = Field(TrainingMonitor)
type = Field(str)
updated = Field(int)
user = Field(str)
class AIExperiment(ApiObject):
budget = Field(float)
client = Field(str)
conditionals = Field(ListOf(Conditional))
created = Field(int)
id = Field(str)
linear_constraints = Field(ListOf(LinearConstraint))
metadata = Field(Metadata)
metrics = Field(ListOf(Metric))
name = Field(str)
num_solutions = Field(int)
parallel_bandwidth = Field(int)
parameters = Field(ListOf(Parameter))
progress = Field(RunsProgress)
project = Field(str)
state = Field(str)
updated = Field(int)
user = Field(str)
class Token(ApiObject):
all_experiments = Field(bool)
client = Field(str)
development = Field(bool)
experiment = Field(str)
expires = Field(int)
permissions = DeprecatedField(str)
token = Field(str)
token_type = Field(str)
user = Field(str)
class BestAssignments(ApiObject):
assignments = Field(Assignments)
id = Field(str)
value = Field(float)
value_stddev = Field(float)
values = Field(ListOf(MetricEvaluation))
class StoppingCriteria(ApiObject):
should_stop = Field(bool)
reasons = Field(ListOf(str))
class Project(ApiObject):
id = Field(str)
client = Field(str)
name = Field(str)
user = Field(str)
created = Field(int)
updated = Field(int)
metadata = Field(Metadata)
class Model(ApiObject):
type = Field(str)
class SourceCode(ApiObject):
content = Field(str)
hash = Field(str)
class TrainingRun(ApiObject):
assignments = Field(Assignments)
best_checkpoint = Field(str)
client = Field(str)
checkpoint_count = Field(int)
completed = Field(int)
created = Field(int)
datasets = Field(ListOf(str))
deleted = Field(bool)
experiment = Field(str)
files = Field(ListOf(str))
finished = Field(bool)
id = Field(str)
logs = Field(MapOf(DictField('content')))
metadata = Field(Metadata)
model = Field(Model)
name = Field(str)
object = Field(str)
observation = Field(str)
project = Field(str)
source_code = Field(SourceCode)
state = Field(str)
suggestion = Field(str)
tags = Field(ListOf(str))
updated = Field(int)
user = Field(str)
values = Field(MapOf(MetricEvaluation))
sys_metadata = Field(SysMetadata)
dev_metadata = Field(Metadata)
class StoppingReasons(_DictWrapper):
pass
class Checkpoint(ApiObject):
id = Field(str)
created = Field(int)
metadata = Field(Metadata)
should_stop = Field(bool)
stopping_reasons = Field(StoppingReasons)
training_run = Field(str)
values = Field(ListOf(MetricEvaluation))
class User(ApiObject):
created = Field(int)
deleted = Field(bool)
email = Field(str)
id = Field(str)
name = Field(str)
class Session(ApiObject):
api_token = Field(Token)
client = Field(Client)
user = Field(User)
| mit | f334f97acc7f373ecf14ba07dfcf90af | 23.510569 | 110 | 0.649728 | 3.516212 | false | false | false | false |
sigopt/sigopt-python | sigopt/orchestrate/controller.py | 1 | 18484 | # Copyright © 2022 Intel Corporation
#
# SPDX-License-Identifier: MIT
import os
import re
from collections import defaultdict
import click
import pint
import yaml
from botocore.exceptions import NoRegionError
from sigopt.paths import get_bin_dir, ensure_dir
from sigopt.utils import accept_sigopt_not_found
from .cluster.errors import AlreadyConnectedException, ClusterError, MultipleClustersConnectionError, NotConnectedError
from .docker.service import DockerException, DockerService
from .exceptions import CheckExecutableError, ModelPackingError, OrchestrateException
from .identifier import IDENTIFIER_TYPE_EXPERIMENT, IDENTIFIER_TYPE_RUN, parse_identifier
from .kubernetes.service import ORCHESTRATE_NAMESPACE, CleanupFailedException
from .paths import (
check_iam_authenticator_executable,
check_kubectl_executable,
download_iam_authenticator_executable,
download_kubectl_executable,
)
from .provider.constants import PROVIDER_TO_STRING, Provider
from .services.orchestrate_bag import OrchestrateServiceBag
from .status import print_status
from .stop import stop_experiment, stop_run
class _ExitException(click.ClickException):
def __init__(self, msg, exit_code=1):
super().__init__(msg)
self.exit_code = exit_code
def docker_login(cluster, docker_service, repository_name):
creds = cluster.get_registry_login_credentials(repository_name)
if creds is not None:
docker_service.login(creds)
class OrchestrateController:
def __init__(self, services):
self.services = services
@classmethod
def create(cls):
try:
services = OrchestrateServiceBag()
except NoRegionError as e:
raise _ExitException("No default region is selected, please run `aws configure`") from e
return cls(services)
def clean_images(self):
self.services.cluster_service.assert_is_connected()
docker_service = DockerService.create(self.services)
docker_service.prune()
def build_and_push_image(
self,
cluster,
docker_service,
dockerfile,
run_options,
quiet,
):
image_name = run_options.get('image')
repository_name, tag = DockerService.get_repository_and_tag(image_name)
docker_login(cluster, docker_service, repository_name)
build_image = run_options.get('build_image', True)
if build_image:
if not quiet:
print('Containerizing and uploading your model, this may take a few minutes...')
try:
image_tag = self.services.model_packer_service.build_image(
docker_service=docker_service,
repository=repository_name,
tag=tag,
quiet=quiet,
dockerfile=dockerfile,
)
image = docker_service.get_image(image_tag)
except ModelPackingError as mpe:
msg = str(mpe)
match = re.search('manifest for (.*?) not found: manifest unknown: manifest unknown', msg)
if match is not None:
msg = f'Unable to find base image {match.groups()[0]} when building your docker container'
raise _ExitException(msg) from mpe
repository_name = cluster.generate_image_tag(repository_name)
repository_image_tag = DockerService.format_image_name(repository_name, tag)
if build_image:
image.tag(repository=repository_name, tag=tag)
if not quiet:
print(f"Uploading the model environment to image registry: {repository_image_tag}")
docker_service.push(repository_name, tag=tag, quiet=quiet)
return repository_name, tag
def runner(
self,
run_options,
command,
cluster,
docker_service,
dockerfile,
project_id,
quiet=False,
optimize=True,
optimization_options=None,
):
if optimize:
if not optimization_options:
raise OrchestrateException('optimize jobs require an experiment yaml file')
repository_name, tag = self.build_and_push_image(
cluster=cluster,
docker_service=docker_service,
dockerfile=dockerfile,
run_options=run_options,
quiet=quiet,
)
resource_options = self.services.gpu_options_validator_service.get_resource_options(run_options)
run_command = command
job_type_str = 'experiment' if optimize else 'run'
if not quiet:
print('Starting your {}'.format(job_type_str))
if optimize:
return self.services.job_runner_service.start_cluster_experiment(
repository=repository_name,
tag=tag,
resource_options=resource_options,
optimization_options=optimization_options,
run_command=run_command,
project_id=project_id,
)
return self.services.job_runner_service.start_cluster_run(
repository=repository_name,
tag=tag,
resource_options=resource_options,
run_command=run_command,
project_id=project_id,
)
def run_on_cluster(self, command, run_options, silent, dockerfile, project_id):
cluster = self.services.cluster_service.test()
quiet = silent
docker_service = DockerService.create(self.services)
identifier = self.runner(
cluster=cluster,
docker_service=docker_service,
quiet=quiet,
optimize=False,
command=command,
run_options=run_options,
dockerfile=dockerfile,
project_id=project_id,
)
if quiet:
print(identifier)
else:
print(f'Started "{identifier}"')
def test_run_on_cluster(self, command, run_options, dockerfile, project_id):
cluster = self.services.cluster_service.test()
docker_service = DockerService.create(self.services)
identifier = self.runner(
cluster=cluster,
docker_service=docker_service,
quiet=False,
optimize=False,
run_options=run_options,
dockerfile=dockerfile,
command=command,
project_id=project_id,
)
run_identifier = parse_identifier(identifier)
label_selector = run_identifier["pod_label_selector"]
print(f"View your run at https://app.sigopt.com/{identifier}")
print("waiting for controller to start...")
def check_pod_condition(event):
if event["type"] == "DELETED":
raise Exception("The pod was deleted")
pod = event["object"]
for condition in pod.status.conditions or []:
if condition.type in ("Ready", "PodScheduled") and condition.status == "False":
print(f"Pod '{pod.metadata.name}' in bad condition: {condition.reason}: {condition.message}")
if condition.reason == "Unschedulable":
print(
"Hint: If you configured your nodes with sufficient resources"
" then you probably just need to wait for the cluster to scale up"
)
for container_status in pod.status.container_statuses or []:
waiting_state = container_status.state.waiting
if waiting_state:
print(
f"Container '{container_status.name}' in pod '{pod.metadata.name}' is waiting:"
f" {waiting_state.reason}: {waiting_state.message}"
)
self.services.kubernetes_service.wait_for_pod_to_start(
label_selector=run_identifier["controller_label_selector"],
event_handler=check_pod_condition,
)
print("controller started, waiting for run to be created...")
self.services.kubernetes_service.wait_for_pod_to_exist(label_selector=label_selector)
print("run created, waiting for it to start...")
pod = self.services.kubernetes_service.wait_for_pod_to_start(
label_selector=label_selector,
event_handler=check_pod_condition,
)
print("run started, following logs")
try:
print("*** START RUN LOGS ***")
for log_line in self.services.kubernetes_service.logs(pod.metadata.name, follow=True):
print(log_line)
print("*** END RUN LOGS ***")
except KeyboardInterrupt:
print()
print("Cleaning up")
stop_run(run_identifier, self.services)
def stop_by_identifier(self, identifier):
identifier_type = identifier["type"]
with accept_sigopt_not_found() as wrap:
if identifier_type == IDENTIFIER_TYPE_RUN:
stop_run(identifier, self.services)
elif identifier_type == IDENTIFIER_TYPE_EXPERIMENT:
stop_experiment(identifier, self.services)
else:
raise NotImplementedError(f"Cannot stop {identifier['raw']}")
if wrap.exception:
print(f"{identifier['raw']}: {str(wrap.exception)}")
else:
print(f"{identifier['raw']}: deleted")
def optimize_on_cluster(self, command, run_options, optimization_options, silent, dockerfile, project_id):
cluster = self.services.cluster_service.test()
quiet = silent
docker_service = DockerService.create(self.services)
identifier = self.runner(
cluster=cluster,
docker_service=docker_service,
quiet=quiet,
optimize=True,
command=command,
run_options=run_options,
optimization_options=optimization_options,
dockerfile=dockerfile,
project_id=project_id,
)
if quiet:
print(identifier)
else:
print(f'Started "{identifier}"')
def create_cluster(self, options):
print('Creating your cluster, this process may take 20-30 minutes or longer...')
# NOTE(dan): checks again now that we know provider, in case aws iam authenticator is needed
check_authenticator_binary(provider=options.get('provider'))
try:
cluster_name = self.services.cluster_service.create(options=options)
except ClusterError as pde:
raise _ExitException(str(pde)) from pde
print(f'Successfully created kubernetes cluster: {cluster_name}')
def update_cluster(self, options):
print('Updating your cluster, this process may take 5-10 minutes or longer...')
# NOTE(dan): checks again now that we know provider, in case aws iam authenticator is needed
check_authenticator_binary(provider=options.get('provider'))
cluster_name = self.services.cluster_service.update(options=options)
print(f'Successfully updated kubernetes cluster: {cluster_name}')
def destroy_connected_cluster(self):
cluster = self.services.cluster_service.get_connected_cluster()
print(f'Destroying cluster {cluster.name}, this process may take 20-30 minutes or longer...')
try:
self.services.kubernetes_service.cleanup_for_destroy()
except CleanupFailedException as cfe:
raise _ExitException(str(cfe)) from cfe
self.services.cluster_service.destroy(
cluster_name=cluster.name,
provider_string=cluster.provider_string,
)
print(f'Successfully destroyed kubernetes cluster: {cluster.name}')
def connect_to_cluster(self, cluster_name, provider_string, registry, kubeconfig):
check_authenticator_binary(provider=provider_string)
print(f'Connecting to cluster {cluster_name}...')
try:
self.services.cluster_service.connect(
cluster_name=cluster_name,
provider_string=provider_string,
kubeconfig=kubeconfig,
registry=registry,
)
print(f'Successfully connected to kubernetes cluster: {cluster_name}')
except AlreadyConnectedException as ace:
raise _ExitException(
f'Already connected to cluster: {ace.current_cluster_name}',
) from ace
def disconnect_from_connected_cluster(self):
cluster = self.services.cluster_service.get_connected_cluster()
print(f'Disconnecting from cluster {cluster.name}...')
try:
self.services.cluster_service.disconnect(cluster.name, disconnect_all=False)
except NotConnectedError:
self.services.logging_service.warning('Not connected to any clusters')
except MultipleClustersConnectionError as mcce:
cluster_names = ", ".join(mcce.connected_clusters)
self.services.logging_service.warning(
f'Connected to multiple clusters: {cluster_names}. '
'Rerun with `disconnect --all`.'
)
except ClusterError as ce:
raise _ExitException(str(ce)) from ce
def test_cluster_connection(self):
print('Testing if you are connected to a cluster, this may take a moment...')
try:
cluster = self.services.cluster_service.test()
except NotConnectedError as nce:
raise _ExitException(
'You are not currently connected to a cluster.',
) from nce
registry_str = cluster.registry if cluster.registry is not None else 'default'
print(
'\nYou are connected to a cluster! Here is the info:'
f'\n\tcluster name: {cluster.name}'
f'\n\tprovider: {cluster.provider_string}'
f'\n\tregistry: {registry_str}'
)
try:
docker_service = DockerService.create(self.services)
docker_service.check_connection()
except DockerException as e:
raise _ExitException(str(e)) from e
def cluster_status(self):
try:
cluster = self.services.cluster_service.test()
except NotConnectedError as nce:
raise _ExitException(
'You are not currently connected to a cluster',
) from nce
print(f"You are currently connected to the cluster: {cluster.name}")
all_pods = self.services.kubernetes_service.get_pods()
nodes = self.services.kubernetes_service.get_nodes()
individual_pods = []
experiment_pods = defaultdict(list)
def group_by_phase(pods):
pods_by_phase = defaultdict(list)
for pod in pods:
pods_by_phase[pod.status.phase].append(pod)
return pods_by_phase
collapse_phases = ["Succeeded"]
def print_pods(all_pods, indent):
by_phase = group_by_phase(all_pods)
tabs = "\t" * indent
for phase, pods in by_phase.items():
print(f"{tabs}{phase}: {len(pods)} runs")
if phase not in collapse_phases:
for p in pods:
print(f"{tabs}\trun/{p.metadata.labels['run']}\t{p.metadata.name}")
for pod in all_pods.items:
if pod.metadata.labels["type"] == "run":
try:
experiment_pods[pod.metadata.labels["experiment"]].append(pod)
except KeyError:
individual_pods.append(pod)
if individual_pods:
print(f"One-off: {len(individual_pods)} runs")
print_pods(individual_pods, 1)
if experiment_pods:
print(f"Experiments: {len(experiment_pods)} total")
for eid, exp_pods in sorted(experiment_pods.items(), key=lambda x: x[0]):
print(f"\texperiment/{eid}: {len(exp_pods)} runs")
print_pods(exp_pods, 2)
print(f"Nodes: {len(nodes.items)} total")
running_pods_by_node = defaultdict(list)
for pod in all_pods.items:
if pod.status.phase == "Running":
running_pods_by_node[pod.spec.node_name].append(pod)
CPU = "cpu"
MEMORY = "memory"
GPU = "nvidia.com/gpu"
RESOURCE_META = ((CPU, "CPU"), (MEMORY, "B"), (GPU, "GPU"))
unit_registry = pint.UnitRegistry()
# NOTE(taylor): creates a new unit "CPU". "mCPU = milli CPU = 0.001 * CPU"
unit_registry.define("CPU = [cpu]")
unit_registry.define("GPU = [gpu]")
for node in nodes.items:
print(f"\t{node.metadata.name}:")
node_resources = [
(c.resources.requests, c.resources.limits)
for p in running_pods_by_node[node.metadata.name]
for c in p.spec.containers
]
# NOTE(taylor): create an inital value for each resource type for requests and limits
all_totals = tuple(
{
resource_type: 0 * unit_registry(ext)
for resource_type, ext in RESOURCE_META
}
for _ in range(2)
)
for resources in node_resources:
for resource_allocation, totals in zip(resources, all_totals):
if not resource_allocation:
continue
for resource_type, ext in RESOURCE_META:
# NOTE(taylor): this parses the resource quantity with a magnitude and unit.
# ex. "12Mi" + "B" == "12*2^20 bytes", "100m" + "CPU" == "0.1 CPU"
totals[resource_type] += unit_registry.Quantity(resource_allocation.get(resource_type, "0") + ext)
requests_totals, limits_totals = all_totals
for resource_type, ext in RESOURCE_META:
allocatable = unit_registry.Quantity(node.status.allocatable.get(resource_type, "0") + ext)
if not allocatable:
continue
print(f"\t\t{resource_type}:")
total_request = requests_totals[resource_type]
percent_request = (100 * total_request / allocatable).to_reduced_units()
total_limit = limits_totals[resource_type]
percent_limit = (100 * total_limit / allocatable).to_reduced_units()
allocatable, total_request, total_limit = (
value.to_compact()
for value in (allocatable, total_request, total_limit)
)
print(f"\t\t\tAllocatable: {allocatable:~.2f}")
print(f"\t\t\tRequests: {total_request:~.2f}, {percent_request:~.2f} %")
print(f"\t\t\tLimits: {total_limit:~.2f}, {percent_limit:~.2f} %")
def print_status(self, identifier):
print(f"{identifier['raw']}:")
with accept_sigopt_not_found() as wrap:
for line in print_status(identifier, self.services):
print(f"\t{line}")
if wrap.exception:
print(f"\t{str(wrap.exception)}")
def install_cluster_plugins(self):
cluster = self.services.cluster_service.get_connected_cluster()
print("Installing required kubernetes resources...")
self.services.kubernetes_service.ensure_plugins(cluster.name, cluster.provider)
print("Uploading required images to your registry...")
print("Finished installing plugins")
def exec_kubectl(self, arguments):
self.services.cluster_service.assert_is_connected()
check_binary(kubectl_check)
cmd = self.services.kubectl_service.kubectl_command
args = [cmd, '--namespace', ORCHESTRATE_NAMESPACE, *arguments]
os.execvpe(
cmd,
args,
self.services.kubectl_service.kubectl_env(),
)
kubectl_check = (check_kubectl_executable, download_kubectl_executable, 'kubernetes')
aws_iam_authenticator_check = (
check_iam_authenticator_executable,
download_iam_authenticator_executable,
'aws iam-authentication',
)
def check_authenticator_binary(provider):
if provider == PROVIDER_TO_STRING[Provider.AWS]:
check_binary(aws_iam_authenticator_check)
def check_binary(options):
ensure_dir(get_bin_dir())
check, download, name = options
try:
check()
except CheckExecutableError:
print(f"Downloading {name} executable, this could take some time...")
download()
check(full_check=True)
def load_user_options(filename):
with open(filename) as f:
options = yaml.safe_load(f) or {}
return options
| mit | b7e9d4df5d2943b81cf903760031e2fd | 34.959144 | 119 | 0.675215 | 3.82117 | false | false | false | false |
choderalab/openmmtools | openmmtools/multistate/utils.py | 1 | 11858 | #!/usr/local/bin/env python
# ==============================================================================
# MODULE DOCSTRING
# ==============================================================================
"""
Multistate Utilities
====================
Sampling Utilities for the YANK Multistate Package. A collection of functions and small classes
which are common to help the samplers and analyzers and other public hooks.
COPYRIGHT
Current version by Andrea Rizzi <andrea.rizzi@choderalab.org>, Levi N. Naden <levi.naden@choderalab.org> and
John D. Chodera <john.chodera@choderalab.org> while at Memorial Sloan Kettering Cancer Center.
Original version by John D. Chodera <jchodera@gmail.com> while at the University of
California Berkeley.
LICENSE
This code is licensed under the latest available version of the MIT License.
"""
import logging
import warnings
import numpy as np
from pymbar import timeseries # for statistical inefficiency analysis
logger = logging.getLogger(__name__)
__all__ = [
'generate_phase_name',
'get_decorrelation_time',
'get_equilibration_data',
'get_equilibration_data_per_sample',
'remove_unequilibrated_data',
'subsample_data_along_axis',
'SimulationNaNError'
]
# =============================================================================================
# Sampling Exceptions
# =============================================================================================
class SimulationNaNError(Exception):
"""Error when a simulation goes to NaN"""
pass
# =============================================================================================
# MODULE FUNCTIONS
# =============================================================================================
def generate_phase_name(current_name, name_list):
"""
Provide a regular way to generate unique human-readable names from base names.
Given a base name and a list of existing names, a number will be appended to the base name until a unique string
is generated.
Parameters
----------
current_name : string
The base name you wish to ensure is unique. Numbers will be appended to this string until a unique string
not in the name_list is provided
name_list : iterable of strings
The current_name, and its modifiers, are compared against this list until a unique string is found
Returns
-------
name : string
Unique string derived from the current_name that is not in name_list.
If the parameter current_name is not already in the name_list, then current_name is returned unmodified.
"""
base_name = 'phase{}'
counter = 0
if current_name is None:
name = base_name.format(counter)
while name in name_list:
counter += 1
name = base_name.format(counter)
elif current_name in name_list:
name = current_name + str(counter)
while name in name_list:
counter += 1
name = current_name + str(counter)
else:
name = current_name
return name
def get_decorrelation_time(timeseries_to_analyze):
"""
Compute the decorrelation times given a timeseries.
See the ``pymbar.timeseries.statisticalInefficiency`` for full documentation
"""
return timeseries.statisticalInefficiency(timeseries_to_analyze)
def get_equilibration_data_per_sample(timeseries_to_analyze, fast=True, max_subset=100):
"""
Compute the correlation time and n_effective per sample with tuning to how you want your data formatted
This is a modified pass-through to ``pymbar.timeseries.detectEquilibration`` does, returning the per sample data.
It has been modified to specify the maximum number of time points to consider, evenly spaced over the timeseries.
This is different than saying "I want analysis done every X for total points Y = len(timeseries)/X",
this is "I want Y total analysis points"
Note that the returned arrays will be of size max_subset - 1, because we always discard data from the first time
origin due to equilibration.
See the ``pymbar.timeseries.detectEquilibration`` function for full algorithm documentation
Parameters
----------
timeseries_to_analyze : np.ndarray
1-D timeseries to analyze for equilibration
max_subset : int >= 1 or None, optional, default: 100
Maximum number of points in the ``timeseries_to_analyze`` on which to analyze the equilibration on.
These are distributed uniformly over the timeseries so the final output (after discarding the first point
due to equilibration) will be size max_subset - 1 where indices are placed approximately every
``(len(timeseries_to_analyze) - 1) / max_subset``.
The full timeseries is used if the timeseries is smaller than ``max_subset`` or if ``max_subset`` is None
fast : bool, optional. Default: True
If True, will use faster (but less accurate) method to estimate correlation time
passed on to timeseries module.
Returns
-------
i_t : np.ndarray of int
Indices of the timeseries which were sampled from
g_i : np.ndarray of float
Estimated statistical inefficiency at t in units of index count.
Equal to 1 + 2 tau, where tau is the correlation time
Will always be >= 1
e.g. If g_i[x] = 4.3, then choosing x as your equilibration point means the every ``ceil(4.3)`` in
``timeseries_to_analyze`` will be decorrelated, so the fully equilibrated decorrelated timeseries would be
indexed by [x, x+5, x+10, ..., X) where X is the final point in the ``timeseries_to_analyze``.
The "index count" in this case is the by count of the ``timeseries_to_analyze`` indices, NOT the ``i_t``
n_effective_i : np.ndarray of float
Number of effective samples by subsampling every ``g_i`` from index t, does include fractional value, so true
number of points will be the floor of this output.
The "index count" in this case is the by count of the ``timeseries_to_analyze`` indices, NOT the ``i_t``
"""
# Cast to array if not already
series = np.array(timeseries_to_analyze)
# Special trap for constant series
time_size = series.size
set_size = time_size - 1 # Cannot analyze the last entry
# Set maximum
if max_subset is None or set_size < max_subset:
max_subset = set_size
# Special trap for series of size 1
if max_subset == 0:
max_subset = 1
# Special trap for constant or size 1 series
if series.std() == 0.0 or max_subset == 1:
return (np.arange(max_subset, dtype=int), # i_t
np.array([1]*max_subset), # g_i
np.arange(time_size, time_size-max_subset, -1) # n_effective_i
)
g_i = np.ones([max_subset], np.float32)
n_effective_i = np.ones([max_subset], np.float32)
counter = np.arange(max_subset)
i_t = np.floor(counter * time_size / max_subset).astype(int)
for i, t in enumerate(i_t):
try:
g_i[i] = timeseries.statisticalInefficiency(series[t:], fast=fast)
except:
g_i[i] = (time_size - t + 1)
n_effective_i[i] = (time_size - t + 1) / g_i[i]
# We should never choose data from the first time origin as the equilibrated data because
# it contains snapshots warming up from minimization, which causes problems with correlation time detection
# By default (max_subset=100), the first 1% of the data is discarded. If 1% is not ideal, user can specify
# max_subset to change the percentage (e.g. if 0.5% is desired, specify max_subset=200).
return i_t[1:], g_i[1:], n_effective_i[1:]
def get_equilibration_data(timeseries_to_analyze, fast=True, max_subset=1000):
"""
Compute equilibration method given a timeseries
See the ``pymbar.timeseries.detectEquilibration`` function for full documentation
Parameters
----------
timeseries_to_analyze : np.ndarray
1-D timeseries to analyze for equilibration
max_subset : int or None, optional, default: 1000
Maximum number of points in the ``timeseries_to_analyze`` on which to analyze the equilibration on.
These are distributed uniformly over the timeseries so the final output will be size max_subset where indices
are placed approximately every ``(len(timeseries_to_analyze) - 1) / max_subset``.
The full timeseries is used if the timeseries is smaller than ``max_subset`` or if ``max_subset`` is None
fast : bool, optional. Default: True
If True, will use faster (but less accurate) method to estimate correlation time
passed on to timeseries module.
Returns
-------
n_equilibration : int
Iteration at which system becomes equilibrated
Computed by point which maximizes the number of samples preserved
g_t : float
Number of indices between each decorelated sample
n_effective_max : float
How many indices are preserved at most.
See Also
--------
get_equilibration_data_per_sample
"""
warnings.warn("This function will be removed in future versions of YANK due to redundancy, "
"Please use the more general `get_equilibration_data_per_sample` function instead.")
i_t, g_i, n_effective_i = get_equilibration_data_per_sample(timeseries_to_analyze, fast=fast, max_subset=max_subset)
n_effective_max = n_effective_i.max()
i_max = n_effective_i.argmax()
n_equilibration = i_t[i_max]
g_t = g_i[i_max]
return n_equilibration, g_t, n_effective_max
def remove_unequilibrated_data(data, number_equilibrated, axis):
"""
Remove the number_equilibrated samples from a dataset
Discards number_equilibrated number of indices from given axis
Parameters
----------
data : np.array-like of any dimension length
This is the data which will be paired down
number_equilibrated : int
Number of indices that will be removed from the given axis, i.e. axis will be shorter by number_equilibrated
axis : int
Axis index along which to remove samples from. This supports negative indexing as well
Returns
-------
equilibrated_data : ndarray
Data with the number_equilibrated number of indices removed from the beginning along axis
"""
cast_data = np.asarray(data)
# Define the slice along an arbitrary dimension
slc = [slice(None)] * len(cast_data.shape)
# Set the dimension we are truncating
slc[axis] = slice(number_equilibrated, None)
# Slice
equilibrated_data = cast_data[tuple(slc)]
return equilibrated_data
def subsample_data_along_axis(data, subsample_rate, axis):
"""
Generate a decorrelated version of a given input data and subsample_rate along a single axis.
Parameters
----------
data : np.array-like of any dimension length
subsample_rate : float or int
Rate at which to draw samples. A sample is considered decorrelated after every ceil(subsample_rate) of
indices along data and the specified axis
axis : int
axis along which to apply the subsampling
Returns
-------
subsampled_data : ndarray of same number of dimensions as data
Data will be subsampled along the given axis
"""
# TODO: find a name for the function that clarifies that decorrelation
# TODO: is determined exclusively by subsample_rate?
cast_data = np.asarray(data)
data_shape = cast_data.shape
# Since we already have g, we can just pass any appropriate shape to the subsample function
indices = timeseries.subsampleCorrelatedData(np.zeros(data_shape[axis]), g=subsample_rate)
subsampled_data = np.take(cast_data, indices, axis=axis)
return subsampled_data
| mit | a0b800f536932751e6600ce6397c8971 | 39.609589 | 120 | 0.651543 | 4.030591 | false | false | false | false |
choderalab/openmmtools | openmmtools/data/cb7-viologen/molecules/create-viologen.py | 1 | 2044 | """
Create mol2 file for substituted viologen.
"""
from openeye.oechem import *
from openeye.oeomega import *
from openeye.oequacpac import *
smiles = "OC(=O)CCCCC[n+](cc1)ccc1-c2cc[n+](cc2)CCCCCC(=O)O" # substituted viologen
output_filename = 'viologen.tripos.mol2'
def assign_am1bcc_charges(mol):
"""
Assign canonical AM1BCC charges.
Parameters
----------
mol : OEMol
The molecule to assign charges for.
Returns:
charged_mol : OEMol
The charged molecule.
"""
omega = OEOmega()
omega.SetIncludeInput(True)
omega.SetCanonOrder(False)
omega.SetSampleHydrogens(True)
eWindow = 15.0
omega.SetEnergyWindow(eWindow)
omega.SetMaxConfs(800)
omega.SetRMSThreshold(1.0)
if omega(mol):
OEAssignPartialCharges(mol, OECharges_AM1BCCSym)
charged_mol = mol.GetConf(OEHasConfIdx(0))
absFCharge = 0
sumFCharge = 0
sumPCharge = 0.0
for atm in mol.GetAtoms():
sumFCharge += atm.GetFormalCharge()
absFCharge += abs(atm.GetFormalCharge())
sumPCharge += atm.GetPartialCharge()
OEThrow.Info("%s: %d formal charges give total charge %d ; Sum of Partial Charges %5.4f"
% (mol.GetTitle(), absFCharge, sumFCharge, sumPCharge))
return charged_mol
else:
OEThrow.Warning("Failed to generate conformation(s) for molecule %s" % mol.GetTitle())
##
# MAIN
##
# Create molecule.
mol = OEMol()
OESmilesToMol(mol, smiles)
# Generate conformation.
print("Generating conformation...")
omega = OEOmega()
omega.SetMaxConfs(1)
omega(mol)
# Assign aromaticity.
OEAssignAromaticFlags(mol, OEAroModelOpenEye)
# Add explicit hydrogens.
OEAddExplicitHydrogens(mol)
# Set title
mol.SetTitle('protonated viologen')
# Assign charges.
print("Assigning canonical AM1-BCC charges...")
charged_mol = assign_am1bcc_charges(mol)
# Write conformation.
ofs = oechem.oemolostream()
ofs.open(output_filename)
oechem.OEWriteMolecule(ofs, charged_mol)
print("Done.")
| mit | 7b488d5298068d31306980a635ad1630 | 21.711111 | 96 | 0.672701 | 2.979592 | false | false | false | false |
choderalab/openmmtools | openmmtools/data/benzene-toluene-implicit/generate-molecules.py | 1 | 1393 | #!/usr/bin/env python
"""
Generate molecules for test system using OpenEye tools.
"""
molecules = { 'BEN' : 'benzene',
'TOL' : 'toluene' }
from openeye import oechem
from openeye import oeomega
from openeye import oeiupac
from openeye import oequacpac
# Create molecules.
for resname in molecules:
name = molecules[resname]
print(name)
# Create molecule from IUPAC name.
molecule = oechem.OEMol()
oeiupac.OEParseIUPACName(molecule, name)
molecule.SetTitle(name)
# Normalize molecule.
oechem.OEAddExplicitHydrogens(molecule)
oechem.OETriposAtomNames(molecule)
oechem.OEAssignAromaticFlags(molecule, oechem.OEAroModelOpenEye)
# Create configuration.
omega = oeomega.OEOmega()
omega.SetStrictStereo(True)
omega.SetIncludeInput(False)
omega(molecule)
# Create charges.
oequacpac.OEAssignPartialCharges(molecule, oequacpac.OECharges_AM1BCCSym)
# Write molecule.
filename = '%s.tripos.mol2' % name
print(filename)
ofs = oechem.oemolostream()
ofs.open(filename)
oechem.OEWriteMolecule(ofs, molecule)
ofs.close()
# Replace <0> with resname.
infile = open(filename, 'r')
lines = infile.readlines()
infile.close()
newlines = [line.replace('<0>', resname) for line in lines]
outfile = open(filename, 'w')
outfile.writelines(newlines)
outfile.close()
| mit | e034a0b9d6c0e49acd09060e0fca943d | 23.875 | 77 | 0.694185 | 3.194954 | false | false | false | false |
choderalab/openmmtools | openmmtools/forces.py | 1 | 56521 | #!/usr/bin/env python
# =============================================================================
# MODULE DOCSTRING
# =============================================================================
"""
Custom OpenMM Forces classes and utilities.
"""
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
import abc
import collections
import copy
import inspect
import logging
import math
import re
import scipy
import numpy as np
try:
import openmm
from openmm import unit
except ImportError: # OpenMM < 7.6
from simtk import openmm, unit
from openmmtools import utils
from openmmtools.constants import ONE_4PI_EPS0, STANDARD_STATE_VOLUME
logger = logging.getLogger(__name__)
# =============================================================================
# UTILITY FUNCTIONS
# =============================================================================
class MultipleForcesError(Exception):
"""Error raised when multiple forces of the same class are found."""
pass
class NoForceFoundError(Exception):
"""Error raised when no forces matching the given criteria are found."""
pass
def iterate_forces(system):
"""Iterate over and restore the Python interface of the forces in the system."""
for force in system.getForces():
utils.RestorableOpenMMObject.restore_interface(force)
yield force
# Yield empty generator if the system has no forces.
return
def find_forces(system, force_type, only_one=False, include_subclasses=False):
"""Find all the ``Force`` object of a given type in an OpenMM system.
Parameters
----------
system : openmm.System
The system to search.
force_type : str, or type
The class of the force to search, or a regular expression that
is used to match its name. Note that ``re.match()`` is used in
this case, not ``re.search()``. The ``iter_subclasses`` argument
must be False when this is a string.
only_one : bool
If True, an exception is raised when multiple forces of the same
type are found in the system, and only a single force is returned.
include_subclasses : bool, optional
If True, all forces inheriting from ``force_type`` are returned
as well (default is False). This can't be enabled if `force_type``
is not a class.
Returns
-------
forces : OrderedDict or tuple
If ``only_one`` is False, a dictionary force_index: force is returned
with all the forces matching the criteria. Otherwise,, a single pair
``(force_idx, force)`` is returned.
Raises
------
NoForceFoundError
If ``only_one`` is True and no forces matching the criteria are found.
MultipleForcesError
If ``only_one`` is True and multiple forces matching the criteria
are found
Examples
--------
The ``only_one`` flag can be used to retrieve a single force.
>>> from openmmtools import testsystems
>>> system = testsystems.TolueneVacuum().system
>>> force_index, force = find_forces(system, openmm.NonbondedForce, only_one=True)
>>> force.__class__.__name__
'NonbondedForce'
It is possible to search for force subclasses.
>>> class MyHarmonicForce(utils.RestorableOpenMMObject, openmm.CustomBondForce):
... pass
>>> force_idx = system.addForce(openmm.CustomBondForce('0.0'))
>>> force_idx = system.addForce(MyHarmonicForce('0.0'))
>>> forces = find_forces(system, openmm.CustomBondForce, include_subclasses=True)
>>> [(force_idx, force.__class__.__name__) for force_idx, force in forces.items()]
[(5, 'CustomBondForce'), (6, 'MyHarmonicForce')]
A regular expression can be used instead of a class.
>>> forces = find_forces(system, 'HarmonicAngleForce')
>>> [(force_idx, force.__class__.__name__) for force_idx, force in forces.items()]
[(1, 'HarmonicAngleForce')]
>>> forces = find_forces(system, '.*Harmonic.*')
>>> [(force_idx, force.__class__.__name__) for force_idx, force in forces.items()]
[(0, 'HarmonicBondForce'), (1, 'HarmonicAngleForce'), (6, 'MyHarmonicForce')]
"""
# Handle force_type argument when it's not a class.
re_pattern = None
if not inspect.isclass(force_type):
re_pattern = re.compile(force_type)
# Find all forces matching the force_type.
forces = {}
for force_idx, force in enumerate(iterate_forces(system)):
# Check force name.
if re_pattern is not None:
if re_pattern.match(force.__class__.__name__):
forces[force_idx] = force
# Check if the force class matches the requirements.
elif type(force) is force_type or (include_subclasses and isinstance(force, force_type)):
forces[force_idx] = force
# Second pass to find all subclasses of the matching forces.
if include_subclasses and re_pattern is not None:
matched_force_classes = [force.__class__ for force in forces.values()]
for force_idx, force in enumerate(iterate_forces(system)):
if force_idx in forces:
continue
for matched_force_class in matched_force_classes:
if isinstance(force, matched_force_class):
forces[force_idx] = force
# Reorder forces by index.
forces = collections.OrderedDict(sorted(forces.items()))
# Handle only_one.
if only_one is True:
if len(forces) == 0:
raise NoForceFoundError('No force of type {} could be found.'.format(force_type))
if len(forces) > 1:
raise MultipleForcesError('Found multiple forces of type {}'.format(force_type))
return forces.popitem(last=False)
return forces
def _compute_sphere_volume(radius):
"""Compute the volume of a square well restraint."""
return 4.0 / 3 * np.pi * radius**3
def _compute_harmonic_volume(radius, spring_constant, beta):
"""Compute the volume of an harmonic potential from 0 to radius.
Parameters
----------
radius : openmm.unit.Quantity
The upper limit on the distance (units of length).
spring_constant : openmm.unit.Quantity
The spring constant of the harmonic potential (units of
energy/mole/length^2).
beta : openmm.unit.Quantity
Thermodynamic beta (units of mole/energy).
Returns
-------
volume : openmm.unit.Quantity
The volume of the harmonic potential (units of length^3).
"""
# Turn everything to consistent dimension-less units.
length_unit = unit.nanometers
energy_unit = unit.kilojoules_per_mole
radius /= length_unit
beta *= energy_unit
spring_constant /= energy_unit/length_unit**2
bk = beta * spring_constant
bk_2 = bk / 2
bkr2_2 = bk_2 * radius**2
volume = math.sqrt(math.pi/2) * math.erf(math.sqrt(bkr2_2)) / bk**(3.0/2)
volume -= math.exp(-bkr2_2) * radius / bk
return 4 * math.pi * volume * length_unit**3
def _compute_harmonic_radius(spring_constant, potential_energy):
"""Find the radius at which the harmonic potential is energy.
Parameters
----------
spring_constant : openmm.unit.Quantity
The spring constant of the harmonic potential (units of
energy/mole/length^2).
potential_energy : openmm.unit.Quantity
The energy of the harmonic restraint (units of energy/mole).
Returns
-------
radius : openmm.unit.Quantity
The radius at which the harmonic potential is energy.
"""
length_unit = unit.nanometers
spring_constant *= length_unit**2
return math.sqrt(2 * potential_energy / spring_constant) * length_unit
# =============================================================================
# GENERIC CLASSES FOR RADIALLY SYMMETRIC RECEPTOR-LIGAND RESTRAINTS
# =============================================================================
class RadiallySymmetricRestraintForce(utils.RestorableOpenMMObject):
"""Base class for radially-symmetric restraint force.
Provide facility functions to compute the standard state correction
of a receptor-ligand restraint.
To create a subclass, implement the properties :func:`restrained_atom_indices1`
and :func:`restrained_atom_indices2` (with their setters) that return
the indices of the restrained atoms.
You will also have to implement :func:`_create_bond`, which should add
the bond using the correct function/signature.
Optionally, you can implement :func:`distance_at_energy` if an
analytical expression for distance(potential_energy) exists.
If you subclass this, and plan on adding additional global parameters,
you need to invoke this class ``super().__init__`` first as the
``controlling_parameter_name`` must be the first global variable.
Parameters
----------
restraint_parameters : OrderedDict
An ordered dictionary containing the bond parameters in the form
parameter_name: parameter_value. The order is important to make
sure that parameters can be retrieved from the bond force with
the correct force index.
restrained_atom_indices1 : iterable of int
The indices of the first group of atoms to restrain.
restrained_atom_indices2 : iterable of int
The indices of the second group of atoms to restrain.
controlling_parameter_name : str
The name of the global parameter controlling the energy function.
*args, **kwargs
Parameters to pass to the super constructor.
Attributes
----------
controlling_parameter_name
"""
def __init__(self, restraint_parameters, restrained_atom_indices1,
restrained_atom_indices2, controlling_parameter_name,
*args, **kwargs):
super(RadiallySymmetricRestraintForce, self).__init__(*args, **kwargs)
# Unzip bond parameters names and values from dict.
assert len(restraint_parameters) == 1 or isinstance(restraint_parameters, collections.OrderedDict)
parameter_names, parameter_values = zip(*restraint_parameters.items())
# Let the subclass initialize its bond.
self._create_bond(parameter_values, restrained_atom_indices1, restrained_atom_indices2)
# Add parameters. First global parameter is _restorable_force__class_hash
# from the RestorableOpenMMObject class.
err_msg = ('The force should have a single global parameter at this point. '
'This is likely because the subclass called addGlobalParameter '
'before calling super().__init__')
assert self.getNumGlobalParameters() == 1, err_msg
self.addGlobalParameter(controlling_parameter_name, 1.0)
for parameter in parameter_names:
self.addPerBondParameter(parameter)
# -------------------------------------------------------------------------
# Abstract methods.
# -------------------------------------------------------------------------
@abc.abstractmethod
def _create_bond(self, bond_parameter_values, restrained_atom_indices1,
restrained_atom_indices2):
"""Create the bond modelling the restraint.
Parameters
----------
bond_parameter_values : list of floats
The list of the parameter values of the bond.
restrained_atom_indices1 : list of int
The indices of the first group of atoms to restrain.
restrained_atom_indices2 : list of int
The indices of the second group of atoms to restrain.
"""
pass
# -------------------------------------------------------------------------
# Properties.
# -------------------------------------------------------------------------
@abc.abstractproperty
def restrained_atom_indices1(self):
"""list: The indices of the first group of restrained atoms."""
pass
@abc.abstractproperty
def restrained_atom_indices2(self):
"""list: The indices of the first group of restrained atoms."""
pass
@property
def restraint_parameters(self):
"""OrderedDict: The restraint parameters in dictionary form."""
parameter_values = self.getBondParameters(0)[-1]
restraint_parameters = [(self.getPerBondParameterName(parameter_idx), parameter_value)
for parameter_idx, parameter_value in enumerate(parameter_values)]
return collections.OrderedDict(restraint_parameters)
@property
def controlling_parameter_name(self):
"""str: The name of the global parameter controlling the energy function (read-only)."""
return self.getGlobalParameterName(1)
def distance_at_energy(self, potential_energy):
"""Compute the distance at which the potential energy is ``potential_energy``.
Parameters
----------
potential_energy : openmm.unit.Quantity
The potential energy of the restraint (units of energy/mole).
Returns
-------
distance : openmm.unit.Quantity
The distance at which the potential energy is ``potential_energy``
(units of length).
"""
raise NotImplementedError()
# -------------------------------------------------------------------------
# Methods to compute the standard state correction.
# -------------------------------------------------------------------------
def compute_standard_state_correction(self, thermodynamic_state, square_well=False,
radius_cutoff=None, energy_cutoff=None,
max_volume=None):
"""Return the standard state correction of the restraint.
The standard state correction is computed as
- log(V_standard / V_restraint)
where V_standard is the volume at standard state concentration and
V_restraint is the restraint volume. V_restraint is bounded by the
volume of the periodic box.
The ``square_well`` parameter, can be used to re-compute the standard
state correction when removing the bias introduced by the restraint.
Parameters
----------
thermodynamic_state : states.ThermodynamicState
The thermodynamic state at which to compute the standard state
correction.
square_well : bool, optional
If True, this computes the standard state correction assuming
the restraint to obey a square well potential. The energy
cutoff is still applied to the original energy potential.
radius_cutoff : openmm.unit.Quantity, optional
The maximum distance achievable by the restraint (units
compatible with nanometers). This is equivalent to placing
a hard wall potential at this distance.
energy_cutoff : float, optional
The maximum potential energy achievable by the restraint in kT.
This is equivalent to placing a hard wall potential at a
distance such that ``potential_energy(distance) == energy_cutoff``.
max_volume : openmm.unit.Quantity or 'system', optional
The volume of the periodic box (units compatible with nanometer**3).
This must be provided the thermodynamic state is in NPT. If the
string 'system' is passed, the maximum volume is computed from
the system box vectors (this has no effect if the system is not
periodic).
Returns
-------
correction : float
The unit-less standard state correction in kT at the given
thermodynamic state.
Raises
------
TypeError
If the thermodynamic state is in the NPT ensemble, and
``max_volume`` is not provided, or if the system is non-periodic
and no cutoff is given.
"""
# Determine restraint bound volume.
is_npt = thermodynamic_state.pressure is not None
if max_volume == 'system':
# ThermodynamicState.volume is None in the NPT ensemble.
# max_volume will still be None if the system is not periodic.
max_volume = thermodynamic_state.get_volume(ignore_ensemble=True)
elif max_volume is None and not is_npt:
max_volume = thermodynamic_state.volume
elif max_volume is None:
raise TypeError('max_volume must be provided with NPT ensemble')
# Non periodic systems reweighted to a square-well restraint must always have a cutoff.
if (not thermodynamic_state.is_periodic and square_well is True and
radius_cutoff is None and energy_cutoff is None and max_volume is None):
raise TypeError('One between radius_cutoff, energy_cutoff, or max_volume '
'must be provided when reweighting non-periodic thermodynamic '
'states to a square-well restraint.')
# If we evaluate the square well potential with no cutoffs,
# just use the volume of the periodic box.
if square_well is True and energy_cutoff is None and radius_cutoff is None:
restraint_volume = max_volume
# If we evaluate the square well potential with no energy cutoff,
# this can easily be solved analytically.
elif square_well is True and radius_cutoff is not None:
restraint_volume = _compute_sphere_volume(radius_cutoff)
# Use numerical integration.
else:
restraint_volume = self._compute_restraint_volume(
thermodynamic_state, square_well, radius_cutoff, energy_cutoff)
# Bound the restraint volume to the periodic box volume.
if max_volume is not None and restraint_volume > max_volume:
debug_msg = 'Limiting the restraint volume to {} nm^3 (original was {} nm^3)'
logger.debug(debug_msg.format(max_volume / unit.nanometers**3,
restraint_volume / unit.nanometers**3))
restraint_volume = max_volume
return -math.log(STANDARD_STATE_VOLUME / restraint_volume)
def _compute_restraint_volume(self, thermodynamic_state, square_well,
radius_cutoff, energy_cutoff):
"""Compute the volume of the restraint.
This function is called by ``compute_standard_state_correction()`` when
the standard state correction depends on the restraint potential.
Parameters
----------
thermodynamic_state : states.ThermodynamicState
The thermodynamic state at which to compute the standard state
correction.
square_well : bool, optional
If True, this computes the standard state correction assuming
the restraint to obey a square well potential. The energy
cutoff is still applied to the original energy potential.
radius_cutoff : openmm.unit.Quantity, optional
The maximum distance achievable by the restraint (units
compatible with nanometers). This is equivalent to placing
a hard wall potential at this distance.
energy_cutoff : float, optional
The maximum potential energy achievable by the restraint in kT.
This is equivalent to placing a hard wall potential at a
distance such that ``potential_energy(distance) == energy_cutoff``.
Returns
-------
restraint_volume : openmm.unit.Quantity
The volume of the restraint (units of length^3).
"""
# By default, use numerical integration.
return self._integrate_restraint_volume(thermodynamic_state, square_well,
radius_cutoff, energy_cutoff)
def _integrate_restraint_volume(self, thermodynamic_state, square_well,
radius_cutoff, energy_cutoff):
"""Compute the restraint volume through numerical integration.
Parameters
----------
thermodynamic_state : states.ThermodynamicState
The thermodynamic state at which to compute the standard state
correction.
square_well : bool, optional
If True, this computes the standard state correction assuming
the restraint to obey a square well potential. The energy
cutoff is still applied to the original energy potential.
radius_cutoff : openmm.unit.Quantity, optional
The maximum distance achievable by the restraint (units
compatible with nanometers). This is equivalent to placing
a hard wall potential at this distance.
energy_cutoff : float, optional
The maximum potential energy achievable by the restraint in kT.
This is equivalent to placing a hard wall potential at a
distance such that ``potential_energy(distance) == energy_cutoff``.
Returns
-------
restraint_volume : openmm.unit.Quantity
The volume of the restraint (units of length^3).
"""
distance_unit = unit.nanometer
# Create a System object containing two particles
# connected by the restraint force.
system = openmm.System()
system.addParticle(1.0 * unit.amu)
system.addParticle(1.0 * unit.amu)
force = copy.deepcopy(self)
force.restrained_atom_indices1 = [0]
force.restrained_atom_indices2 = [1]
# Disable the PBC for this approximation of the analytical solution.
force.setUsesPeriodicBoundaryConditions(False)
system.addForce(force)
# Create a Reference context to evaluate energies on the CPU.
integrator = openmm.VerletIntegrator(1.0 * unit.femtoseconds)
platform = openmm.Platform.getPlatformByName('Reference')
context = openmm.Context(system, integrator, platform)
# Set default positions.
positions = unit.Quantity(np.zeros([2,3]), distance_unit)
context.setPositions(positions)
# Create a function to compute integrand as a function of interparticle separation.
beta = thermodynamic_state.beta
def restraint_potential_func(r):
"""Return the potential energy in kT from the distance in nanometers."""
positions[1, 0] = r * distance_unit
context.setPositions(positions)
state = context.getState(getEnergy=True)
return beta * state.getPotentialEnergy()
def integrand(r):
"""
Parameters
----------
r : float
Inter-particle separation in nanometers
Returns
-------
dI : float
Contribution to the integral (in nm^2).
"""
potential = restraint_potential_func(r)
# If above the energy cutoff, this doesn't contribute to the integral.
if energy_cutoff is not None and potential > energy_cutoff:
return 0.0
# Check if we're reweighting to a square well potential.
if square_well:
potential = 0.0
dI = 4.0 * math.pi * r**2 * math.exp(-potential)
return dI
# Determine integration limits.
r_min, r_max, analytical_volume = self._determine_integral_limits(
thermodynamic_state, radius_cutoff, energy_cutoff, restraint_potential_func)
# Integrate restraint volume.
restraint_volume, restraint_volume_error = scipy.integrate.quad(
lambda r: integrand(r), r_min / distance_unit, r_max / distance_unit)
restraint_volume = restraint_volume * distance_unit**3 + analytical_volume
logger.debug("restraint_volume = {} nm^3".format(restraint_volume / distance_unit**3))
return restraint_volume
def _determine_integral_limits(self, thermodynamic_state, radius_cutoff,
energy_cutoff, potential_energy_func):
"""Determine integration limits for the standard state correction calculation.
This is called by ``_integrate_restraint_volume()`` to determine
the limits for numerical integration. This is important if we have
a cutoff as the points evaluated by scipy.integrate.quad are adaptively
chosen, and the hard wall can create numerical problems.
If part of the potential energy function can be computed analytically
you can reduce the integration interval and return a non-zero constant
to be added to the result of the integration.
Parameters
----------
thermodynamic_state : states.ThermodynamicState
The thermodynamic state at which to compute the standard state
correction.
square_well : bool, optional
If True, this computes the standard state correction assuming
the restraint to obey a square well potential. The energy
cutoff is still applied to the original energy potential.
radius_cutoff : openmm.unit.Quantity, optional
The maximum distance achievable by the restraint (units
compatible with nanometers). This is equivalent to placing
a hard wall potential at this distance.
energy_cutoff : float, optional
The maximum potential energy achievable by the restraint in kT.
This is equivalent to placing a hard wall potential at a
distance such that ``potential_energy(distance) == energy_cutoff``.
Returns
-------
r_min : openmm.unit.Quantity
The lower limit for numerical integration.
r_max : openmm.unit.Quantity
The upper limit for numerical integration.
analytical_volume : openmm.unit.Quantity
Volume excluded from the numerical integration that has been
computed analytically. This will be summed to the volume
computed through numerical integration.
"""
distance_unit = unit.nanometers
# The lower limit is always 0. Find the upper limit.
r_min = 0.0 * distance_unit
r_max = float('inf')
analytical_volume = 0.0 * distance_unit**3
if radius_cutoff is not None:
r_max = min(r_max, radius_cutoff / distance_unit)
if energy_cutoff is not None:
# First check if an analytical solution is available.
try:
energy_cutoff_distance = self.distance_at_energy(energy_cutoff*thermodynamic_state.kT)
except NotImplementedError:
# Find the first distance that exceeds the cutoff.
potential = 0.0
energy_cutoff_distance = 0.0 # In nanometers.
while potential <= energy_cutoff and energy_cutoff_distance < r_max:
energy_cutoff_distance += 0.1 # 1 Angstrom.
potential = potential_energy_func(energy_cutoff_distance)
r_max = min(r_max, energy_cutoff_distance)
# Handle the case where there are no distance or energy cutoff.
if r_max == float('inf'):
# For periodic systems, take thrice the maximum dimension of the system.
if thermodynamic_state.is_periodic:
box_vectors = thermodynamic_state.default_box_vectors
max_dimension = np.max(unit.Quantity(box_vectors) / distance_unit)
r_max = 3.0 * max_dimension
else:
r_max = 100.0 # distance_unit
r_max *= distance_unit
return r_min, r_max, analytical_volume
class RadiallySymmetricCentroidRestraintForce(RadiallySymmetricRestraintForce,
openmm.CustomCentroidBondForce):
"""Base class for radially-symmetric restraints between the centroids of two groups of atoms.
The restraint is applied between the centers of mass of two groups
of atoms. The restraint strength is controlled by a global context
parameter whose name is passed on construction through the optional
argument ``controlling_parameter_name``.
With OpenCL, only on 64bit platforms are supported.
Parameters
----------
energy_function : str
The energy function to pass to ``CustomCentroidBondForce``. The
name of the controlling global parameter will be prepended to
this expression.
restraint_parameters : OrderedDict
An ordered dictionary containing the bond parameters in the form
parameter_name: parameter_value. The order is important to make
sure that parameters can be retrieved from the bond force with
the correct force index.
restrained_atom_indices1 : iterable of int
The indices of the first group of atoms to restrain.
restrained_atom_indices2 : iterable of int
The indices of the second group of atoms to restrain.
controlling_parameter_name : str, optional
The name of the global parameter controlling the energy function.
The default value is 'lambda_restraints'.
Attributes
----------
restraint_parameters
restrained_atom_indices1
restrained_atom_indices2
controlling_parameter_name
"""
def __init__(self, energy_function, restraint_parameters,
restrained_atom_indices1, restrained_atom_indices2,
controlling_parameter_name='lambda_restraints'):
# Initialize CustomCentroidBondForce.
energy_function = controlling_parameter_name + ' * (' + energy_function + ')'
custom_centroid_bond_force_args = [2, energy_function]
super(RadiallySymmetricCentroidRestraintForce, self).__init__(
restraint_parameters, restrained_atom_indices1, restrained_atom_indices2,
controlling_parameter_name, *custom_centroid_bond_force_args)
@property
def restrained_atom_indices1(self):
"""The indices of the first group of restrained atoms."""
restrained_atom_indices1, weights_group1 = self.getGroupParameters(0)
return list(restrained_atom_indices1)
@restrained_atom_indices1.setter
def restrained_atom_indices1(self, atom_indices):
self.setGroupParameters(0, atom_indices)
@property
def restrained_atom_indices2(self):
"""The indices of the first group of restrained atoms."""
restrained_atom_indices2, weights_group2 = self.getGroupParameters(1)
return list(restrained_atom_indices2)
@restrained_atom_indices2.setter
def restrained_atom_indices2(self, atom_indices):
self.setGroupParameters(1, atom_indices)
def _create_bond(self, bond_parameter_values, restrained_atom_indices1,
restrained_atom_indices2):
"""Create the bond modelling the restraint."""
self.addGroup(restrained_atom_indices1)
self.addGroup(restrained_atom_indices2)
self.addBond([0, 1], bond_parameter_values)
class RadiallySymmetricBondRestraintForce(RadiallySymmetricRestraintForce,
openmm.CustomBondForce):
"""Base class for radially-symmetric restraints between two atoms.
This is a version of ``RadiallySymmetricCentroidRestraintForce`` that can
be used with OpenCL 32-bit platforms. It supports atom groups with only a
single atom.
"""
def __init__(self, energy_function, restraint_parameters,
restrained_atom_index1, restrained_atom_index2,
controlling_parameter_name='lambda_restraints'):
# Initialize CustomBondForce.
energy_function = energy_function.replace('distance(g1,g2)', 'r')
energy_function = controlling_parameter_name + ' * (' + energy_function + ')'
super(RadiallySymmetricBondRestraintForce, self).__init__(
restraint_parameters, [restrained_atom_index1], [restrained_atom_index2],
controlling_parameter_name, energy_function)
# -------------------------------------------------------------------------
# Public properties.
# -------------------------------------------------------------------------
@property
def restrained_atom_indices1(self):
"""The indices of the first group of restrained atoms."""
atom1, atom2, parameters = self.getBondParameters(0)
return [atom1]
@restrained_atom_indices1.setter
def restrained_atom_indices1(self, atom_indices):
assert len(atom_indices) == 1
atom1, atom2, parameters = self.getBondParameters(0)
self.setBondParameters(0, atom_indices[0], atom2, parameters)
@property
def restrained_atom_indices2(self):
"""The indices of the first group of restrained atoms."""
atom1, atom2, parameters = self.getBondParameters(0)
return [atom2]
@restrained_atom_indices2.setter
def restrained_atom_indices2(self, atom_indices):
assert len(atom_indices) == 1
atom1, atom2, parameters = self.getBondParameters(0)
self.setBondParameters(0, atom1, atom_indices[0], parameters)
def _create_bond(self, bond_parameter_values, restrained_atom_indices1, restrained_atom_indices2):
"""Create the bond modelling the restraint."""
self.addBond(restrained_atom_indices1[0], restrained_atom_indices2[0], bond_parameter_values)
# =============================================================================
# HARMONIC RESTRAINTS
# =============================================================================
class HarmonicRestraintForceMixIn(object):
"""A mix-in providing the interface for harmonic restraints."""
def __init__(self, spring_constant, *args, **kwargs):
energy_function = '(K/2)*distance(g1,g2)^2'
restraint_parameters = collections.OrderedDict([('K', spring_constant)])
super(HarmonicRestraintForceMixIn, self).__init__(energy_function, restraint_parameters,
*args, **kwargs)
@property
def spring_constant(self):
"""unit.openmm.Quantity: The spring constant K (units of energy/mole/distance^2)."""
# This works for both CustomBondForce and CustomCentroidBondForce.
parameters = self.getBondParameters(0)[-1]
return parameters[0] * unit.kilojoule_per_mole/unit.nanometers**2
def distance_at_energy(self, potential_energy):
"""Compute the distance at which the potential energy is ``potential_energy``.
Parameters
----------
potential_energy : openmm.unit.Quantity
The potential energy of the restraint (units of energy/mole).
Returns
-------
distance : openmm.unit.Quantity
The distance at which the potential energy is ``potential_energy``
(units of length).
"""
return _compute_harmonic_radius(self.spring_constant, potential_energy)
def _compute_restraint_volume(self, thermodynamic_state, square_well,
radius_cutoff, energy_cutoff):
"""Compute the restraint volume analytically."""
# If there is not a cutoff, integrate up to 100kT
if energy_cutoff is None:
energy_cutoff = 100.0 # kT
radius = self.distance_at_energy(energy_cutoff * thermodynamic_state.kT)
if radius_cutoff is not None:
radius = min(radius, radius_cutoff)
if square_well:
return _compute_sphere_volume(radius)
return _compute_harmonic_volume(radius, self.spring_constant,
thermodynamic_state.beta)
class HarmonicRestraintForce(HarmonicRestraintForceMixIn,
RadiallySymmetricCentroidRestraintForce):
"""Impose a single harmonic restraint between the centroids of two groups of atoms.
This can be used to prevent the ligand from drifting too far from the
protein in implicit solvent calculations or to keep the ligand close
to the binding pocket in the decoupled states to increase mixing.
The restraint is applied between the centroids of two groups of atoms
that belong to the receptor and the ligand respectively. The centroids
are determined by a mass-weighted average of the group particles positions.
The energy expression of the restraint is given by
``E = controlling_parameter * (K/2)*r^2``
where `K` is the spring constant, `r` is the distance between the
two group centroids, and `controlling_parameter` is a scale factor that
can be used to control the strength of the restraint.
With OpenCL, only on 64bit platforms are supported.
Parameters
----------
spring_constant : openmm.unit.Quantity
The spring constant K (see energy expression above) in units
compatible with joule/nanometer**2/mole.
restrained_atom_indices1 : iterable of int
The indices of the first group of atoms to restrain.
restrained_atom_indices2 : iterable of int
The indices of the second group of atoms to restrain.
controlling_parameter_name : str, optional
The name of the global parameter controlling the energy function.
The default value is 'lambda_restraints'.
Attributes
----------
spring_constant
restrained_atom_indices1
restrained_atom_indices2
restraint_parameters
controlling_parameter_name
"""
# All the methods are provided by the mix-ins.
pass
class HarmonicRestraintBondForce(HarmonicRestraintForceMixIn,
RadiallySymmetricBondRestraintForce):
"""Impose a single harmonic restraint between two atoms.
This is a version of ``HarmonicRestraintForce`` that can be used with
OpenCL 32-bit platforms. It supports atom groups with only a single atom.
Parameters
----------
spring_constant : openmm.unit.Quantity
The spring constant K (see energy expression above) in units
compatible with joule/nanometer**2/mole.
restrained_atom_index1 : int
The index of the first atom to restrain.
restrained_atom_index2 : int
The index of the second atom to restrain.
controlling_parameter_name : str, optional
The name of the global parameter controlling the energy function.
The default value is 'lambda_restraints'.
Attributes
----------
spring_constant
restrained_atom_indices1
restrained_atom_indices2
restraint_parameters
controlling_parameter_name
"""
# All the methods are provided by the mix-ins.
pass
# =============================================================================
# FLAT-BOTTOM RESTRAINTS
# =============================================================================
class FlatBottomRestraintForceMixIn(object):
"""A mix-in providing the interface for flat-bottom restraints."""
def __init__(self, spring_constant, well_radius, *args, **kwargs):
energy_function = 'step(distance(g1,g2)-r0) * (K/2)*(distance(g1,g2)-r0)^2'
restraint_parameters = collections.OrderedDict([
('K', spring_constant),
('r0', well_radius)
])
super(FlatBottomRestraintForceMixIn, self).__init__(energy_function, restraint_parameters,
*args, **kwargs)
@property
def spring_constant(self):
"""unit.openmm.Quantity: The spring constant K (units of energy/mole/length^2)."""
# This works for both CustomBondForce and CustomCentroidBondForce.
parameters = self.getBondParameters(0)[-1]
return parameters[0] * unit.kilojoule_per_mole/unit.nanometers**2
@property
def well_radius(self):
"""unit.openmm.Quantity: The distance at which the harmonic restraint is imposed (units of length)."""
# This works for both CustomBondForce and CustomCentroidBondForce.
parameters = self.getBondParameters(0)[-1]
return parameters[1] * unit.nanometers
def distance_at_energy(self, potential_energy):
"""Compute the distance at which the potential energy is ``potential_energy``.
Parameters
----------
potential_energy : openmm.unit.Quantity
The potential energy of the restraint (units of energy/mole).
Returns
-------
distance : openmm.unit.Quantity
The distance at which the potential energy is ``potential_energy``
(units of length).
"""
if potential_energy == 0.0*unit.kilojoules_per_mole:
raise ValueError('Cannot compute the distance at this potential energy.')
harmonic_radius = _compute_harmonic_radius(self.spring_constant, potential_energy)
return self.well_radius + harmonic_radius
def _compute_restraint_volume(self, thermodynamic_state, square_well,
radius_cutoff, energy_cutoff):
"""Compute the restraint volume analytically."""
# Check if we are using square well and we can avoid numerical integration.
if square_well:
_, r_max, _ = self._determine_integral_limits(
thermodynamic_state, radius_cutoff, energy_cutoff)
return _compute_sphere_volume(r_max)
return self._integrate_restraint_volume(thermodynamic_state, square_well,
radius_cutoff, energy_cutoff)
def _determine_integral_limits(self, thermodynamic_state, radius_cutoff,
energy_cutoff, potential_energy_func=None):
# If there is not a cutoff, integrate up to 100kT.
if energy_cutoff is None:
energy_cutoff = 100.0 # kT
energy_cutoff = energy_cutoff * thermodynamic_state.kT
r_max = _compute_harmonic_radius(self.spring_constant, energy_cutoff)
r_max += self.well_radius
if radius_cutoff is not None:
r_max = min(r_max, radius_cutoff)
# Compute the volume from the flat-bottom part of the potential.
r_min = min(r_max, self.well_radius)
analytical_volume = _compute_sphere_volume(r_min)
return r_min, r_max, analytical_volume
class FlatBottomRestraintForce(FlatBottomRestraintForceMixIn,
RadiallySymmetricCentroidRestraintForce):
"""A restraint between the centroids of two groups of atoms using a flat potential well with harmonic walls.
An alternative choice to receptor-ligand restraints that uses a flat
potential inside most of the protein volume with harmonic restraining
walls outside of this. It can be used to prevent the ligand from
drifting too far from protein in implicit solvent calculations while
still exploring the surface of the protein for putative binding sites.
The restraint is applied between the centroids of two groups of atoms
that belong to the receptor and the ligand respectively. The centroids
are determined by a mass-weighted average of the group particles positions.
More precisely, the energy expression of the restraint is given by
``E = controlling_parameter * step(r-r0) * (K/2)*(r-r0)^2``
where ``K`` is the spring constant, ``r`` is the distance between the
restrained atoms, ``r0`` is another parameter defining the distance
at which the restraint is imposed, and ``controlling_parameter``
is a scale factor that can be used to control the strength of the
restraint.
With OpenCL, only on 64bit platforms are supported.
Parameters
----------
spring_constant : openmm.unit.Quantity
The spring constant K (see energy expression above) in units
compatible with joule/nanometer**2/mole.
well_radius : openmm.unit.Quantity
The distance r0 (see energy expression above) at which the harmonic
restraint is imposed in units of distance.
restrained_atom_indices1 : iterable of int
The indices of the first group of atoms to restrain.
restrained_atom_indices2 : iterable of int
The indices of the second group of atoms to restrain.
controlling_parameter_name : str, optional
The name of the global parameter controlling the energy function.
The default value is 'lambda_restraints'.
Attributes
----------
spring_constant
well_radius
restrained_atom_indices1
restrained_atom_indices2
restraint_parameters
controlling_parameter_name
"""
# All the methods are provided by the mix-ins.
pass
class FlatBottomRestraintBondForce(FlatBottomRestraintForceMixIn,
RadiallySymmetricBondRestraintForce):
"""A restraint between two atoms using a flat potential well with harmonic walls.
This is a version of ``FlatBottomRestraintForce`` that can be used with
OpenCL 32-bit platforms. It supports atom groups with only a single atom.
Parameters
----------
spring_constant : openmm.unit.Quantity
The spring constant K (see energy expression above) in units
compatible with joule/nanometer**2/mole.
well_radius : openmm.unit.Quantity
The distance r0 (see energy expression above) at which the harmonic
restraint is imposed in units of distance.
restrained_atom_index1 : int
The index of the first group of atoms to restrain.
restrained_atom_index2 : int
The index of the second group of atoms to restrain.
controlling_parameter_name : str, optional
The name of the global parameter controlling the energy function.
The default value is 'lambda_restraints'.
Attributes
----------
spring_constant
well_radius
restrained_atom_indices1
restrained_atom_indices2
restraint_parameters
controlling_parameter_name
"""
# All the methods are provided by the mix-ins.
pass
# =============================================================================
# REACTION FIELD
# =============================================================================
class UnshiftedReactionFieldForce(openmm.CustomNonbondedForce):
"""A force modelling switched reaction-field electrostatics.
Contrarily to a normal `NonbondedForce` with `CutoffPeriodic` nonbonded
method, this force sets the `c_rf` to 0.0 and uses a switching function
to avoid forces discontinuities at the cutoff distance.
Parameters
----------
cutoff_distance : openmm.unit.Quantity, default 15*angstroms
The cutoff distance (units of distance).
switch_width : openmm.unit.Quantity, default 1.0*angstrom
Switch width for electrostatics (units of distance).
reaction_field_dielectric : float
The dielectric constant used for the solvent.
"""
def __init__(self, cutoff_distance=15*unit.angstroms, switch_width=1.0*unit.angstrom,
reaction_field_dielectric=78.3):
k_rf = cutoff_distance**(-3) * (reaction_field_dielectric - 1.0) / (2.0*reaction_field_dielectric + 1.0)
# Energy expression omits c_rf constant term.
energy_expression = "ONE_4PI_EPS0*chargeprod*(r^(-1) + k_rf*r^2);"
energy_expression += "chargeprod = charge1*charge2;"
energy_expression += "k_rf = {:f};".format(k_rf.value_in_unit_system(unit.md_unit_system))
energy_expression += "ONE_4PI_EPS0 = {:f};".format(ONE_4PI_EPS0) # already in OpenMM units
# Create CustomNonbondedForce.
super(UnshiftedReactionFieldForce, self).__init__(energy_expression)
# Add parameters.
self.addPerParticleParameter("charge")
# Configure force.
self.setNonbondedMethod(openmm.CustomNonbondedForce.CutoffPeriodic)
self.setCutoffDistance(cutoff_distance)
self.setUseLongRangeCorrection(False)
if switch_width is not None:
self.setUseSwitchingFunction(True)
self.setSwitchingDistance(cutoff_distance - switch_width)
else: # Truncated
self.setUseSwitchingFunction(False)
@classmethod
def from_nonbonded_force(cls, nonbonded_force, switch_width=1.0*unit.angstrom):
"""Copy constructor from an OpenMM `NonbondedForce`.
The returned force has same cutoff distance and dielectric, and
its particles have the same charges. Exclusions corresponding to
`nonbonded_force` exceptions are also added.
.. warning
This only creates the force object. The electrostatics in
`nonbonded_force` remains unmodified. Use the function
`replace_reaction_field` to correctly convert a system to
use an unshifted reaction field potential.
Parameters
----------
nonbonded_force : openmm.NonbondedForce
The nonbonded force to copy.
switch_width : openmm.unit.Quantity
Switch width for electrostatics (units of distance).
Returns
-------
reaction_field_force : UnshiftedReactionFieldForce
The reaction field force with copied particles.
"""
# OpenMM gives unitless values.
cutoff_distance = nonbonded_force.getCutoffDistance()
reaction_field_dielectric = nonbonded_force.getReactionFieldDielectric()
reaction_field_force = cls(cutoff_distance, switch_width, reaction_field_dielectric)
# Set particle charges.
for particle_index in range(nonbonded_force.getNumParticles()):
charge, sigma, epsilon = nonbonded_force.getParticleParameters(particle_index)
reaction_field_force.addParticle([charge])
# Add exclusions to CustomNonbondedForce.
for exception_index in range(nonbonded_force.getNumExceptions()):
iatom, jatom, chargeprod, sigma, epsilon = nonbonded_force.getExceptionParameters(exception_index)
reaction_field_force.addExclusion(iatom, jatom)
return reaction_field_force
@classmethod
def from_system(cls, system, switch_width=1.0*unit.angstrom):
"""Copy constructor from the first OpenMM `NonbondedForce` in `system`.
If multiple `NonbondedForce`s are found, an exception is raised.
.. warning
This only creates the force object. The electrostatics in
`nonbonded_force` remains unmodified. Use the function
`replace_reaction_field` to correctly convert a system to
use an unshifted reaction field potential.
Parameters
----------
system : openmm.System
The system containing the nonbonded force to copy.
switch_width : openmm.unit.Quantity
Switch width for electrostatics (units of distance).
Returns
-------
reaction_field_force : UnshiftedReactionFieldForce
The reaction field force.
See Also
--------
UnshiftedReactionField.from_nonbonded_force
"""
force_idx, nonbonded_force = find_forces(system, openmm.NonbondedForce, only_one=True)
return cls.from_nonbonded_force(nonbonded_force, switch_width)
class SwitchedReactionFieldForce(openmm.CustomNonbondedForce):
"""A force modelling switched reaction-field electrostatics.
Parameters
----------
cutoff_distance : openmm.unit.Quantity, default 15*angstroms
The cutoff distance (units of distance).
switch_width : openmm.unit.Quantity, default 1.0*angstrom
Switch width for electrostatics (units of distance).
reaction_field_dielectric : float
The dielectric constant used for the solvent.
"""
def __init__(self, cutoff_distance=15*unit.angstroms, switch_width=1.0*unit.angstrom,
reaction_field_dielectric=78.3):
k_rf = cutoff_distance**(-3) * (reaction_field_dielectric - 1.0) / (2.0*reaction_field_dielectric + 1.0)
c_rf = cutoff_distance**(-1) * (3*reaction_field_dielectric) / (2.0*reaction_field_dielectric + 1.0)
# Energy expression omits c_rf constant term.
energy_expression = "ONE_4PI_EPS0*chargeprod*(r^(-1) + k_rf*r^2 - c_rf);"
energy_expression += "chargeprod = charge1*charge2;"
energy_expression += "k_rf = {:f};".format(k_rf.value_in_unit_system(unit.md_unit_system))
energy_expression += "c_rf = {:f};".format(c_rf.value_in_unit_system(unit.md_unit_system))
energy_expression += "ONE_4PI_EPS0 = {:f};".format(ONE_4PI_EPS0) # already in OpenMM units
# Create CustomNonbondedForce.
super(SwitchedReactionFieldForce, self).__init__(energy_expression)
# Add parameters.
self.addPerParticleParameter("charge")
# Configure force.
self.setNonbondedMethod(openmm.CustomNonbondedForce.CutoffPeriodic)
self.setCutoffDistance(cutoff_distance)
self.setUseLongRangeCorrection(False)
if switch_width is not None:
self.setUseSwitchingFunction(True)
self.setSwitchingDistance(cutoff_distance - switch_width)
else: # Truncated
self.setUseSwitchingFunction(False)
@classmethod
def from_nonbonded_force(cls, nonbonded_force, switch_width=1.0*unit.angstrom):
"""Copy constructor from an OpenMM `NonbondedForce`.
The returned force has same cutoff distance and dielectric, and
its particles have the same charges. Exclusions corresponding to
`nonbonded_force` exceptions are also added.
.. warning
This only creates the force object. The electrostatics in
`nonbonded_force` remains unmodified. Use the function
`replace_reaction_field` to correctly convert a system to
use an unshifted reaction field potential.
Parameters
----------
nonbonded_force : openmm.NonbondedForce
The nonbonded force to copy.
switch_width : openmm.unit.Quantity
Switch width for electrostatics (units of distance).
Returns
-------
reaction_field_force : UnshiftedReactionFieldForce
The reaction field force with copied particles.
"""
# OpenMM gives unitless values.
cutoff_distance = nonbonded_force.getCutoffDistance()
reaction_field_dielectric = nonbonded_force.getReactionFieldDielectric()
reaction_field_force = cls(cutoff_distance, switch_width, reaction_field_dielectric)
# Set particle charges.
for particle_index in range(nonbonded_force.getNumParticles()):
charge, sigma, epsilon = nonbonded_force.getParticleParameters(particle_index)
reaction_field_force.addParticle([charge])
# Add exclusions to CustomNonbondedForce.
for exception_index in range(nonbonded_force.getNumExceptions()):
iatom, jatom, chargeprod, sigma, epsilon = nonbonded_force.getExceptionParameters(exception_index)
reaction_field_force.addExclusion(iatom, jatom)
return reaction_field_force
@classmethod
def from_system(cls, system, switch_width=1.0*unit.angstrom):
"""Copy constructor from the first OpenMM `NonbondedForce` in `system`.
If multiple `NonbondedForce`s are found, an exception is raised.
.. warning
This only creates the force object. The electrostatics in
`nonbonded_force` remains unmodified. Use the function
`replace_reaction_field` to correctly convert a system to
use an unshifted reaction field potential.
Parameters
----------
system : openmm.System
The system containing the nonbonded force to copy.
switch_width : openmm.unit.Quantity
Switch width for electrostatics (units of distance).
Returns
-------
reaction_field_force : UnshiftedReactionFieldForce
The reaction field force.
See Also
--------
UnshiftedReactionField.from_nonbonded_force
"""
force_idx, nonbonded_force = find_forces(system, openmm.NonbondedForce, only_one=True)
return cls.from_nonbonded_force(nonbonded_force, switch_width)
if __name__ == '__main__':
import doctest
doctest.testmod()
| mit | df4afda26853fc55cbc276208723a990 | 40.805473 | 112 | 0.635286 | 4.41881 | false | false | false | false |
choderalab/openmmtools | openmmtools/multistate/replicaexchange.py | 1 | 22206 | #!/usr/local/bin/env python
# ==============================================================================
# MODULE DOCSTRING
# ==============================================================================
"""
ReplicaExchangeSampler
======================
Derived multi-thermodynamic state multistate class with exchanging configurations between replicas
COPYRIGHT
Current version by Andrea Rizzi <andrea.rizzi@choderalab.org>, Levi N. Naden <levi.naden@choderalab.org> and
John D. Chodera <john.chodera@choderalab.org> while at Memorial Sloan Kettering Cancer Center.
Original version by John D. Chodera <jchodera@gmail.com> while at the University of
California Berkeley.
LICENSE
This code is licensed under the latest available version of the MIT License.
"""
# ==============================================================================
# GLOBAL IMPORTS
# ==============================================================================
import os
import math
import copy
import logging
import numpy as np
import mdtraj as md
from numba import njit
from openmmtools import multistate, utils
from openmmtools.multistate.multistateanalyzer import MultiStateSamplerAnalyzer
import mpiplus
logger = logging.getLogger(__name__)
# ==============================================================================
# REPLICA-EXCHANGE SIMULATION
# ==============================================================================
class ReplicaExchangeSampler(multistate.MultiStateSampler):
"""Replica-exchange simulation facility.
This MultiStateSampler class provides a general replica-exchange simulation facility,
allowing any set of thermodynamic states to be specified, along with a
set of initial positions to be assigned to the replicas in a round-robin
fashion.
No distinction is made between one-dimensional and multidimensional replica
layout. By default, the replica mixing scheme attempts to mix *all* replicas
to minimize slow diffusion normally found in multidimensional replica exchange
simulations (Modification of the 'replica_mixing_scheme' setting will allow
the traditional 'neighbor swaps only' scheme to be used.)
Stored configurations, energies, swaps, and restart information are all written
to a single output file using the platform portable, robust, and efficient
NetCDF4 library.
Parameters
----------
mcmc_moves : MCMCMove or list of MCMCMove, optional
The MCMCMove used to propagate the states. If a list of MCMCMoves,
they will be assigned to the correspondent thermodynamic state on
creation. If None is provided, Langevin dynamics with 2fm timestep, 5.0/ps collision rate,
and 500 steps per iteration will be used.
number_of_iterations : int or infinity, optional, default: 1
The number of iterations to perform. Both ``float('inf')`` and
``numpy.inf`` are accepted for infinity. If you set this to infinity,
be sure to set also ``online_analysis_interval``.
replica_mixing_scheme : 'swap-all', 'swap-neighbors' or None, Default: 'swap-all'
The scheme used to swap thermodynamic states between replicas.
online_analysis_interval : None or Int >= 1, optional, default None
Choose the interval at which to perform online analysis of the free energy.
After every interval, the simulation will be stopped and the free energy estimated.
If the error in the free energy estimate is at or below ``online_analysis_target_error``, then the simulation
will be considered completed.
online_analysis_target_error : float >= 0, optional, default 0.2
The target error for the online analysis measured in kT per phase.
Once the free energy is at or below this value, the phase will be considered complete.
If ``online_analysis_interval`` is None, this option does nothing.
online_analysis_minimum_iterations : int >= 0, optional, default 50
Set the minimum number of iterations which must pass before online analysis is carried out.
Since the initial samples likely not to yield a good estimate of free energy, save time and just skip them
If ``online_analysis_interval`` is None, this does nothing
Attributes
----------
n_replicas
iteration
mcmc_moves
sampler_states
metadata
is_completed
Examples
--------
Parallel tempering simulation of alanine dipeptide in implicit solvent (replica
exchange among temperatures). This is just an illustrative example; use :class:`ParallelTempering`
class for actual production parallel tempering simulations.
Create the system.
>>> import math
>>> from openmm import unit
>>> from openmmtools import testsystems, states, mcmc
>>> testsystem = testsystems.AlanineDipeptideImplicit()
>>> import os
>>> import tempfile
Create thermodynamic states for parallel tempering with exponentially-spaced schedule.
>>> n_replicas = 3 # Number of temperature replicas.
>>> T_min = 298.0 * unit.kelvin # Minimum temperature.
>>> T_max = 600.0 * unit.kelvin # Maximum temperature.
>>> temperatures = [T_min + (T_max - T_min) * (math.exp(float(i) / float(n_replicas-1)) - 1.0) / (math.e - 1.0)
... for i in range(n_replicas)]
>>> thermodynamic_states = [states.ThermodynamicState(system=testsystem.system, temperature=T)
... for T in temperatures]
Initialize simulation object with options. Run with a GHMC integrator.
>>> move = mcmc.GHMCMove(timestep=2.0*unit.femtoseconds, n_steps=50)
>>> simulation = ReplicaExchangeSampler(mcmc_moves=move, number_of_iterations=2)
Create simulation with its storage file (in a temporary directory) and run.
>>> storage_path = tempfile.NamedTemporaryFile(delete=False).name + '.nc'
>>> reporter = multistate.MultiStateReporter(storage_path, checkpoint_interval=1)
>>> simulation.create(thermodynamic_states=thermodynamic_states,
... sampler_states=states.SamplerState(testsystem.positions),
... storage=reporter)
Please cite the following:
<BLANKLINE>
Friedrichs MS, Eastman P, Vaidyanathan V, Houston M, LeGrand S, Beberg AL, Ensign DL, Bruns CM, and Pande VS. Accelerating molecular dynamic simulations on graphics processing unit. J. Comput. Chem. 30:864, 2009. DOI: 10.1002/jcc.21209
Eastman P and Pande VS. OpenMM: A hardware-independent framework for molecular simulations. Comput. Sci. Eng. 12:34, 2010. DOI: 10.1109/MCSE.2010.27
Eastman P and Pande VS. Efficient nonbonded interactions for molecular dynamics on a graphics processing unit. J. Comput. Chem. 31:1268, 2010. DOI: 10.1002/jcc.21413
Eastman P and Pande VS. Constant constraint matrix approximation: A robust, parallelizable constraint method for molecular simulations. J. Chem. Theor. Comput. 6:434, 2010. DOI: 10.1021/ct900463w
Chodera JD and Shirts MR. Replica exchange and expanded ensemble simulations as Gibbs multistate: Simple improvements for enhanced mixing. J. Chem. Phys., 135:194110, 2011. DOI:10.1063/1.3660669
<BLANKLINE>
>>> simulation.run() # This runs for a maximum of 2 iterations.
>>> simulation.iteration
2
>>> simulation.run(n_iterations=1)
>>> simulation.iteration
2
To resume a simulation from an existing storage file and extend it beyond
the original number of iterations.
>>> del simulation
>>> simulation = ReplicaExchangeSampler.from_storage(reporter)
Please cite the following:
<BLANKLINE>
Friedrichs MS, Eastman P, Vaidyanathan V, Houston M, LeGrand S, Beberg AL, Ensign DL, Bruns CM, and Pande VS. Accelerating molecular dynamic simulations on graphics processing unit. J. Comput. Chem. 30:864, 2009. DOI: 10.1002/jcc.21209
Eastman P and Pande VS. OpenMM: A hardware-independent framework for molecular simulations. Comput. Sci. Eng. 12:34, 2010. DOI: 10.1109/MCSE.2010.27
Eastman P and Pande VS. Efficient nonbonded interactions for molecular dynamics on a graphics processing unit. J. Comput. Chem. 31:1268, 2010. DOI: 10.1002/jcc.21413
Eastman P and Pande VS. Constant constraint matrix approximation: A robust, parallelizable constraint method for molecular simulations. J. Chem. Theor. Comput. 6:434, 2010. DOI: 10.1021/ct900463w
Chodera JD and Shirts MR. Replica exchange and expanded ensemble simulations as Gibbs multistate: Simple improvements for enhanced mixing. J. Chem. Phys., 135:194110, 2011. DOI:10.1063/1.3660669
<BLANKLINE>
>>> simulation.extend(n_iterations=1)
>>> simulation.iteration
3
You can extract several information from the NetCDF file using the Reporter
class while the simulation is running. This reads the SamplerStates of every
run iteration.
>>> reporter = multistate.MultiStateReporter(storage=storage_path, open_mode='r', checkpoint_interval=1)
>>> sampler_states = reporter.read_sampler_states(iteration=1)
>>> len(sampler_states)
3
>>> sampler_states[0].positions.shape # Alanine dipeptide has 22 atoms.
(22, 3)
Clean up.
>>> os.remove(storage_path)
:param number_of_iterations: Maximum number of integer iterations that will be run
:param replica_mixing_scheme: Scheme which describes how replicas are exchanged each iteration as string
:param online_analysis_interval: How frequently to carry out online analysis in number of iterations
:param online_analysis_target_error: Target free energy difference error float at which simulation will be stopped during online analysis, in dimensionless energy
:param online_analysis_minimum_iterations: Minimum number of iterations needed before online analysis is run as int
"""
# -------------------------------------------------------------------------
# Constructors.
# -------------------------------------------------------------------------
def __init__(self, replica_mixing_scheme='swap-all', **kwargs):
# Initialize multi-state sampler simulation.
super(ReplicaExchangeSampler, self).__init__(**kwargs)
self.replica_mixing_scheme = replica_mixing_scheme
class _StoredProperty(multistate.MultiStateSampler._StoredProperty):
@staticmethod
def _repex_mixing_scheme_validator(instance, replica_mixing_scheme):
supported_schemes = ['swap-all', 'swap-neighbors', None]
if replica_mixing_scheme not in supported_schemes:
raise ValueError("Unknown replica mixing scheme '{}'. Supported values "
"are {}.".format(replica_mixing_scheme, supported_schemes))
if instance.locality is not None:
if replica_mixing_scheme not in ['swap-neighbors']:
raise ValueError("replica_mixing_scheme must be 'swap-neighbors' if locality is used")
return replica_mixing_scheme
replica_mixing_scheme = _StoredProperty('replica_mixing_scheme',
validate_function=_StoredProperty._repex_mixing_scheme_validator)
_TITLE_TEMPLATE = ('Replica-exchange sampler simulation created using ReplicaExchangeSampler class '
'of openmmtools.multistate on {}')
def _pre_write_create(self, thermodynamic_states, sampler_states, *args, **kwargs):
"""Overwrite parent implementation to make sure the number of
thermodynamic states is equal to the number of sampler states.
"""
# Make sure there are no more sampler states than thermodynamic states.
n_states = len(thermodynamic_states)
if len(sampler_states) > n_states:
raise ValueError('Passed {} SamplerStates but only {} ThermodynamicStates'.format(
len(sampler_states), n_states))
# Distribute sampler states to replicas in a round-robin fashion.
# The sampler states are deep-copied inside super()._pre_write_create().
sampler_states = [sampler_states[i % len(sampler_states)] for i in range(n_states)]
super()._pre_write_create(thermodynamic_states, sampler_states, *args, **kwargs)
@mpiplus.on_single_node(0, broadcast_result=True)
def _mix_replicas(self):
"""Attempt to swap replicas according to user-specified scheme."""
logger.debug("Mixing replicas...")
# Reset storage to keep track of swap attempts this iteration.
self._n_accepted_matrix[:, :] = 0
self._n_proposed_matrix[:, :] = 0
# Perform swap attempts according to requested scheme.
with utils.time_it('Mixing of replicas'):
if self.replica_mixing_scheme == 'swap-neighbors':
self._mix_neighboring_replicas()
elif self.replica_mixing_scheme == 'swap-all':
nswap_attempts = self.n_replicas**3
# Try to use numba-accelerated mixing code if possible,
# otherwise fall back to Python-accelerated code.
try:
self._mix_all_replicas_numba(
nswap_attempts, self.n_replicas,
self._replica_thermodynamic_states, self._energy_thermodynamic_states,
self._n_accepted_matrix, self._n_proposed_matrix
)
except (ValueError, ImportError) as e:
logger.warning(str(e))
self._mix_all_replicas(nswap_attempts)
else:
assert self.replica_mixing_scheme is None
# Determine fraction of swaps accepted this iteration.
n_swaps_proposed = self._n_proposed_matrix.sum()
n_swaps_accepted = self._n_accepted_matrix.sum()
swap_fraction_accepted = 0.0
if n_swaps_proposed > 0:
swap_fraction_accepted = n_swaps_accepted / n_swaps_proposed
logger.debug("Accepted {}/{} attempted swaps ({:.1f}%)".format(n_swaps_accepted, n_swaps_proposed,
swap_fraction_accepted * 100.0))
return self._replica_thermodynamic_states
@staticmethod
@njit
def _mix_all_replicas_numba(
nswap_attempts,
n_replicas, _replica_thermodynamic_states, _energy_thermodynamic_states,
_n_accepted_matrix, _n_proposed_matrix):
"""
numba-accelerated version of _mix_all_replicas()
All arguments must be passed during the function call because of numba jit limitations.
Parameters
----------
nswap_attempts : int
Number of swaps to attempt
n_replicas : int
Number of replicas
_replica_thermodynamic_states : array-like of int of shape [n_replicas]
_replica_thermodynamic_states[replica_index] is the thermodynamic state visited by that replica
_energy_thermodynamic_states : array-like of float of shape [n_replicas, n_replicas]
_energy_thermodynamic_states[replica_index,state_index] is the reduced potential of state ``state_index``
for replica ``replica_index``
_n_accepted_matrix : array-like of float of shape [n_replicas, n_replicas]
_n_accepted_matrix[from_state,to_state] is the number of accepted swaps
_n_proposed_matrix : array-like of float of shape [n_replicas, n_replicas]
_n_accepted_matrix[from_state,to_state] is the number of proposed swaps
"""
for swap_attempt in range(nswap_attempts):
# Choose random replicas uniformly to attempt to swap.
replica_i = np.random.randint(n_replicas)
replica_j = np.random.randint(n_replicas)
# Determine the thermodynamic states associated to these replicas.
thermodynamic_state_i = _replica_thermodynamic_states[replica_i]
thermodynamic_state_j = _replica_thermodynamic_states[replica_j]
# Compute log probability of swap.
energy_ij = _energy_thermodynamic_states[replica_i, thermodynamic_state_j]
energy_ji = _energy_thermodynamic_states[replica_j, thermodynamic_state_i]
energy_ii = _energy_thermodynamic_states[replica_i, thermodynamic_state_i]
energy_jj = _energy_thermodynamic_states[replica_j, thermodynamic_state_j]
log_p_accept = - (energy_ij + energy_ji) + energy_ii + energy_jj
# Record that this move has been proposed.
_n_proposed_matrix[thermodynamic_state_i, thermodynamic_state_j] += 1
_n_proposed_matrix[thermodynamic_state_j, thermodynamic_state_i] += 1
# Accept or reject.
if log_p_accept >= 0.0 or np.random.rand() < np.exp(log_p_accept):
# Swap states in replica slots i and j.
_replica_thermodynamic_states[replica_i] = thermodynamic_state_j
_replica_thermodynamic_states[replica_j] = thermodynamic_state_i
# Accumulate statistics.
_n_accepted_matrix[thermodynamic_state_i, thermodynamic_state_j] += 1
_n_accepted_matrix[thermodynamic_state_j, thermodynamic_state_i] += 1
def _mix_all_replicas(self, nswap_attempts=100):
"""Exchange all replicas with Python."""
# Determine number of swaps to attempt to ensure thorough mixing.
# TODO: Replace this with analytical result computed to guarantee sufficient mixing, or
# TODO: adjust it based on how many we can afford to do and not have mixing take a
# TODO: substantial fraction of iteration time.
logger.debug("Will attempt to swap all pairs of replicas, using a total of %d attempts." % nswap_attempts)
# Attempt swaps to mix replicas.
for swap_attempt in range(nswap_attempts):
# Choose random replicas uniformly to attempt to swap.
replica_i = np.random.randint(self.n_replicas)
replica_j = np.random.randint(self.n_replicas)
self._attempt_swap(replica_i, replica_j)
def _mix_neighboring_replicas(self):
"""Attempt exchanges between neighboring replicas only."""
logger.debug("Will attempt to swap only neighboring replicas.")
# TODO: Extend this to allow more remote swaps or more thorough mixing if locality > 1.
# Attempt swaps of pairs of replicas using traditional scheme (e.g. [0,1], [2,3], ...).
offset = np.random.randint(2) # Offset is 0 or 1.
for thermodynamic_state_i in range(offset, self.n_replicas-1, 2):
thermodynamic_state_j = thermodynamic_state_i + 1 # Neighboring state.
# Determine which replicas currently hold the thermodynamic states.
replica_i = np.where(self._replica_thermodynamic_states == thermodynamic_state_i)
replica_j = np.where(self._replica_thermodynamic_states == thermodynamic_state_j)
self._attempt_swap(replica_i, replica_j)
def _attempt_swap(self, replica_i, replica_j):
"""Attempt a single exchange between two replicas."""
# Determine the thermodynamic states associated to these replicas.
thermodynamic_state_i = self._replica_thermodynamic_states[replica_i]
thermodynamic_state_j = self._replica_thermodynamic_states[replica_j]
# Compute log probability of swap.
energy_ij = self._energy_thermodynamic_states[replica_i, thermodynamic_state_j]
energy_ji = self._energy_thermodynamic_states[replica_j, thermodynamic_state_i]
energy_ii = self._energy_thermodynamic_states[replica_i, thermodynamic_state_i]
energy_jj = self._energy_thermodynamic_states[replica_j, thermodynamic_state_j]
log_p_accept = - (energy_ij + energy_ji) + energy_ii + energy_jj
# Record that this move has been proposed.
self._n_proposed_matrix[thermodynamic_state_i, thermodynamic_state_j] += 1
self._n_proposed_matrix[thermodynamic_state_j, thermodynamic_state_i] += 1
# Accept or reject.
if log_p_accept >= 0.0 or np.random.rand() < math.exp(log_p_accept):
# Swap states in replica slots i and j.
self._replica_thermodynamic_states[replica_i] = thermodynamic_state_j
self._replica_thermodynamic_states[replica_j] = thermodynamic_state_i
# Accumulate statistics.
self._n_accepted_matrix[thermodynamic_state_i, thermodynamic_state_j] += 1
self._n_accepted_matrix[thermodynamic_state_j, thermodynamic_state_i] += 1
@mpiplus.on_single_node(rank=0, broadcast_result=False, sync_nodes=False)
def _display_citations(self, overwrite_global=False, citation_stack=None):
"""
Display papers to be cited.
The overwrite_golbal command will force the citation to display even if the "have_citations_been_shown" variable
is True
"""
gibbs_citations = """\
Chodera JD and Shirts MR. Replica exchange and expanded ensemble simulations as Gibbs multistate: Simple improvements for enhanced mixing. J. Chem. Phys., 135:194110, 2011. DOI:10.1063/1.3660669
"""
if self.replica_mixing_scheme == 'swap-all':
if citation_stack is None:
citation_stack = [gibbs_citations]
else:
citation_stack = [gibbs_citations] + citation_stack
super()._display_citations(overwrite_global=overwrite_global, citation_stack=citation_stack)
class ReplicaExchangeAnalyzer(MultiStateSamplerAnalyzer):
"""
The ReplicaExchangeAnalyzer is the analyzer for a simulation generated from a Replica Exchange sampler simulation,
implemented as an instance of the :class:`MultiStateSamplerAnalyzer`.
See Also
--------
PhaseAnalyzer
MultiStateSamplerAnalyzer
"""
pass
# ==============================================================================
# MAIN AND TESTS
# ==============================================================================
if __name__ == "__main__":
import doctest
doctest.testmod()
| mit | b87025c139b2f8c11518c85bfd4a702d | 48.677852 | 247 | 0.65113 | 4.066288 | false | false | false | false |
davidsandberg/facenet | src/generative/models/dfc_vae_resnet.py | 1 | 6257 | # MIT License
#
# Copyright (c) 2017 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Variational autoencoder based on the paper
'Deep Feature Consistent Variational Autoencoder'
(https://arxiv.org/pdf/1610.00291.pdf)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
import generative.models.vae_base # @UnresolvedImport
class Vae(generative.models.vae_base.Vae):
def __init__(self, latent_variable_dim):
super(Vae, self).__init__(latent_variable_dim, 64)
def encoder(self, images, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('encoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = images
net = slim.conv2d(net, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_1b')
net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_2b')
net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_3b')
net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 256, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_4b')
net = slim.flatten(net)
fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
return fc1, fc2
def decoder(self, latent_var, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('decoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = slim.fully_connected(latent_var, 4096, activation_fn=None, normalizer_fn=None, scope='Fc_1')
net = tf.reshape(net, [-1,4,4,256], name='Reshape')
net = tf.image.resize_nearest_neighbor(net, size=(8,8), name='Upsample_1')
net = slim.conv2d(net, 128, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_1a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_1b')
net = tf.image.resize_nearest_neighbor(net, size=(16,16), name='Upsample_2')
net = slim.conv2d(net, 64, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_2a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_2b')
net = tf.image.resize_nearest_neighbor(net, size=(32,32), name='Upsample_3')
net = slim.conv2d(net, 32, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_3a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_3b')
net = tf.image.resize_nearest_neighbor(net, size=(64,64), name='Upsample_4')
net = slim.conv2d(net, 3, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_4a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 3, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_4b')
net = slim.conv2d(net, 3, [3, 3], 1, activation_fn=None, scope='Conv2d_4c')
return net
def conv2d_block(inp, scale, *args, **kwargs):
return inp + slim.conv2d(inp, *args, **kwargs) * scale
def leaky_relu(x):
return tf.maximum(0.1*x,x)
| mit | edb428727757b13710a7a14fb3a76947 | 55.890909 | 131 | 0.58958 | 3.565242 | false | false | false | false |
althonos/pronto | pronto/synonym.py | 1 | 6009 | # coding: utf-8
import functools
import typing
import weakref
from typing import Iterable, Optional, Set
from .utils.meta import roundrepr, typechecked
from .xref import Xref
if typing.TYPE_CHECKING:
from .ontology import Ontology
_SCOPES = frozenset({"EXACT", "RELATED", "BROAD", "NARROW", None})
__all__ = ["SynonymType", "SynonymData", "Synonym"]
@roundrepr
@functools.total_ordering
class SynonymType(object):
"""A user-defined synonym type."""
id: str
description: str
scope: Optional[str]
__slots__ = ("__weakref__", "id", "description", "scope")
@typechecked()
def __init__(self, id: str, description: str, scope: Optional[str] = None):
if scope not in _SCOPES:
raise ValueError(f"invalid synonym scope: {scope}")
self.id = id
self.description = description
self.scope = scope
def __eq__(self, other):
if isinstance(other, SynonymType):
return self.id == other.id
return False
def __lt__(self, other):
if isinstance(other, SynonymType):
if self.id < other.id:
return True
return self.id == other.id and self.description < other.description
return NotImplemented
def __hash__(self):
return hash((SynonymType, self.id))
@roundrepr
@functools.total_ordering
class SynonymData(object):
"""Internal data storage of `Synonym` information."""
description: str
scope: Optional[str]
type: Optional[str]
xrefs: Set[Xref]
__slots__ = ("__weakref__", "description", "type", "xrefs", "scope")
def __eq__(self, other):
if isinstance(other, SynonymData):
return self.description == other.description and self.scope == other.scope
return False
def __lt__(self, other): # FIXME?
if not isinstance(other, SynonymData):
return NotImplemented
if self.type is not None and other.type is not None:
return (self.description, self.scope, self.type, frozenset(self.xrefs)) < (
self.description,
self.scope,
other.type,
frozenset(other.xrefs),
)
else:
return (self.description, self.scope, frozenset(self.xrefs)) < (
self.description,
self.scope,
frozenset(other.xrefs),
)
def __hash__(self):
return hash((self.description, self.scope))
def __init__(
self,
description: str,
scope: Optional[str] = None,
type: Optional[str] = None,
xrefs: Optional[Iterable[Xref]] = None,
):
if scope not in _SCOPES:
raise ValueError(f"invalid synonym scope: {scope}")
self.description = description
self.scope = scope
self.type = type
self.xrefs = set(xrefs) if xrefs is not None else set()
@functools.total_ordering
class Synonym(object):
"""A synonym for an entity, with respect to the OBO terminology."""
__ontology: "Ontology"
if typing.TYPE_CHECKING:
__data: "weakref.ReferenceType[SynonymData]"
def __init__(self, ontology: "Ontology", data: "SynonymData"):
self.__data = weakref.ref(data)
self.__ontology = ontology
def _data(self) -> SynonymData:
rdata = self.__data()
if rdata is None:
raise RuntimeError("synonym data was deallocated")
return rdata
else:
__slots__: Iterable[str] = ("__weakref__", "__ontology", "_data")
def __init__(self, ontology: "Ontology", syndata: "SynonymData"):
if syndata.type is not None:
if not any(t.id == syndata.type for t in ontology.synonym_types()):
raise ValueError(f"undeclared synonym type: {syndata.type}")
self._data = weakref.ref(syndata)
self.__ontology = ontology
def __eq__(self, other: object):
if isinstance(other, Synonym):
return self._data() == other._data()
return False
def __lt__(self, other: object):
if not isinstance(other, Synonym):
return False
return self._data().__lt__(other._data())
def __hash__(self):
return hash(self._data())
def __repr__(self):
return roundrepr.make(
"Synonym",
self.description,
scope=(self.scope, None),
type=(self.type, None),
xrefs=(self.xrefs, set()),
)
@property
def description(self) -> str:
return self._data().description
@description.setter # type: ignore
@typechecked(property=True)
def description(self, description: str) -> None:
self._data().description = description
@property
def type(self) -> Optional[SynonymType]:
ontology, syndata = self.__ontology, self._data()
if syndata.type is not None:
return next(t for t in ontology.synonym_types() if t.id == syndata.type)
return None
@type.setter # type: ignore
@typechecked(property=True)
def type(self, type_: Optional[SynonymType]) -> None:
synonyms: Iterable[SynonymType] = self.__ontology.synonym_types()
if type_ is not None and not any(type_.id == s.id for s in synonyms):
raise ValueError(f"undeclared synonym type: {type_.id}")
self._data().type = type_.id if type_ is not None else None
@property
def scope(self) -> Optional[str]:
return self._data().scope
@scope.setter # type: ignore
@typechecked(property=True)
def scope(self, scope: Optional[str]):
if scope not in _SCOPES:
raise ValueError(f"invalid synonym scope: {scope}")
self._data().scope = scope
@property
def xrefs(self) -> Set[Xref]:
return self._data().xrefs
@xrefs.setter
def xrefs(self, xrefs: Iterable[Xref]):
self._data().xrefs = set(xrefs)
| mit | 7b57129d3bf0ad05ec33e9a4f729d2c6 | 29.045 | 87 | 0.58246 | 3.942913 | false | false | false | false |
marshmallow-code/marshmallow-sqlalchemy | tests/conftest.py | 1 | 6733 | from types import SimpleNamespace
import datetime as dt
import pytest
import sqlalchemy as sa
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship, backref, column_property, synonym
class AnotherInteger(sa.Integer):
"""Use me to test if MRO works like we want"""
pass
class AnotherText(sa.types.TypeDecorator):
"""Use me to test if MRO and `impl` virtual type works like we want"""
impl = sa.UnicodeText
@pytest.fixture()
def Base():
return declarative_base()
@pytest.fixture()
def engine():
return sa.create_engine("sqlite:///:memory:", echo=False)
@pytest.fixture()
def session(Base, models, engine):
Session = sessionmaker(bind=engine)
Base.metadata.create_all(bind=engine)
return Session()
@pytest.fixture()
def models(Base):
# models adapted from https://github.com/wtforms/wtforms-sqlalchemy/blob/master/tests/tests.py
student_course = sa.Table(
"student_course",
Base.metadata,
sa.Column("student_id", sa.Integer, sa.ForeignKey("student.id")),
sa.Column("course_id", sa.Integer, sa.ForeignKey("course.id")),
)
class Course(Base):
__tablename__ = "course"
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String(255), nullable=False)
# These are for better model form testing
cost = sa.Column(sa.Numeric(5, 2), nullable=False)
description = sa.Column(sa.Text, nullable=True)
level = sa.Column(sa.Enum("Primary", "Secondary"))
has_prereqs = sa.Column(sa.Boolean, nullable=False)
started = sa.Column(sa.DateTime, nullable=False)
grade = sa.Column(AnotherInteger, nullable=False)
transcription = sa.Column(AnotherText, nullable=False)
@property
def url(self):
return f"/courses/{self.id}"
class School(Base):
__tablename__ = "school"
id = sa.Column("school_id", sa.Integer, primary_key=True)
name = sa.Column(sa.String(255), nullable=False)
student_ids = association_proxy(
"students", "id", creator=lambda sid: Student(id=sid)
)
@property
def url(self):
return f"/schools/{self.id}"
class Student(Base):
__tablename__ = "student"
id = sa.Column(sa.Integer, primary_key=True)
full_name = sa.Column(sa.String(255), nullable=False, unique=True)
dob = sa.Column(sa.Date(), nullable=True)
date_created = sa.Column(
sa.DateTime, default=dt.datetime.utcnow, doc="date the student was created"
)
current_school_id = sa.Column(
sa.Integer, sa.ForeignKey(School.id), nullable=False
)
current_school = relationship(School, backref=backref("students"))
possible_teachers = association_proxy("current_school", "teachers")
courses = relationship(
Course,
secondary=student_course,
backref=backref("students", lazy="dynamic"),
)
# Test complex column property
subquery = sa.select([sa.func.count(student_course.c.course_id)]).where(
student_course.c.student_id == id
)
if hasattr(subquery, "scalar_subquery"):
subquery = subquery.scalar_subquery()
else: # SQLA < 1.4
subquery = subquery.as_scalar()
course_count = column_property(subquery)
@property
def url(self):
return f"/students/{self.id}"
class Teacher(Base):
__tablename__ = "teacher"
id = sa.Column(sa.Integer, primary_key=True)
full_name = sa.Column(
sa.String(255), nullable=False, unique=True, default="Mr. Noname"
)
current_school_id = sa.Column(
sa.Integer, sa.ForeignKey(School.id), nullable=True
)
current_school = relationship(School, backref=backref("teachers"))
curr_school_id = synonym("current_school_id")
substitute = relationship("SubstituteTeacher", uselist=False, backref="teacher")
@property
def fname(self):
return self.full_name
class SubstituteTeacher(Base):
__tablename__ = "substituteteacher"
id = sa.Column(sa.Integer, sa.ForeignKey("teacher.id"), primary_key=True)
class Paper(Base):
__tablename__ = "paper"
satype = sa.Column(sa.String(50))
__mapper_args__ = {"polymorphic_identity": "paper", "polymorphic_on": satype}
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String, nullable=False, unique=True)
class GradedPaper(Paper):
__tablename__ = "gradedpaper"
__mapper_args__ = {"polymorphic_identity": "gradedpaper"}
id = sa.Column(sa.Integer, sa.ForeignKey("paper.id"), primary_key=True)
marks_available = sa.Column(sa.Integer)
class Seminar(Base):
__tablename__ = "seminar"
title = sa.Column(sa.String, primary_key=True)
semester = sa.Column(sa.String, primary_key=True)
label = column_property(title + ": " + semester)
lecturekeywords_table = sa.Table(
"lecturekeywords",
Base.metadata,
sa.Column("keyword_id", sa.Integer, sa.ForeignKey("keyword.id")),
sa.Column("lecture_id", sa.Integer, sa.ForeignKey("lecture.id")),
)
class Keyword(Base):
__tablename__ = "keyword"
id = sa.Column(sa.Integer, primary_key=True)
keyword = sa.Column(sa.String)
class Lecture(Base):
__tablename__ = "lecture"
__table_args__ = (
sa.ForeignKeyConstraint(
["seminar_title", "seminar_semester"],
["seminar.title", "seminar.semester"],
),
)
id = sa.Column(sa.Integer, primary_key=True)
topic = sa.Column(sa.String)
seminar_title = sa.Column(sa.String, sa.ForeignKey(Seminar.title))
seminar_semester = sa.Column(sa.String, sa.ForeignKey(Seminar.semester))
seminar = relationship(
Seminar, foreign_keys=[seminar_title, seminar_semester], backref="lectures"
)
kw = relationship("Keyword", secondary=lecturekeywords_table)
keywords = association_proxy(
"kw", "keyword", creator=lambda kw: Keyword(keyword=kw)
)
return SimpleNamespace(
Course=Course,
School=School,
Student=Student,
Teacher=Teacher,
SubstituteTeacher=SubstituteTeacher,
Paper=Paper,
GradedPaper=GradedPaper,
Seminar=Seminar,
Lecture=Lecture,
Keyword=Keyword,
)
| mit | fbe86a5226b9ddbbb608b064c8ac2744 | 30.909953 | 98 | 0.614585 | 3.750975 | false | true | false | false |
marshmallow-code/marshmallow-sqlalchemy | src/marshmallow_sqlalchemy/fields.py | 1 | 5284 | import warnings
from marshmallow import fields
from marshmallow.utils import is_iterable_but_not_string
from sqlalchemy import inspect
from sqlalchemy.orm.exc import NoResultFound
def get_primary_keys(model):
"""Get primary key properties for a SQLAlchemy model.
:param model: SQLAlchemy model class
"""
mapper = model.__mapper__
return [mapper.get_property_by_column(column) for column in mapper.primary_key]
def ensure_list(value):
return value if is_iterable_but_not_string(value) else [value]
class RelatedList(fields.List):
def get_value(self, obj, attr, accessor=None):
# Do not call `fields.List`'s get_value as it calls the container's
# `get_value` if the container has `attribute`.
# Instead call the `get_value` from the parent of `fields.List`
# so the special handling is avoided.
return super(fields.List, self).get_value(obj, attr, accessor=accessor)
class Related(fields.Field):
"""Related data represented by a SQLAlchemy `relationship`. Must be attached
to a :class:`Schema` class whose options includes a SQLAlchemy `model`, such
as :class:`SQLAlchemySchema`.
:param list columns: Optional column names on related model. If not provided,
the primary key(s) of the related model will be used.
"""
default_error_messages = {
"invalid": "Could not deserialize related value {value!r}; "
"expected a dictionary with keys {keys!r}"
}
def __init__(self, columns=None, column=None, **kwargs):
if column is not None:
warnings.warn(
"`column` parameter is deprecated and will be removed in future releases. "
"Use `columns` instead.",
DeprecationWarning,
)
if columns is None:
columns = column
super().__init__(**kwargs)
self.columns = ensure_list(columns or [])
@property
def model(self):
return self.root.opts.model
@property
def related_model(self):
model_attr = getattr(self.model, self.attribute or self.name)
if hasattr(model_attr, "remote_attr"): # handle association proxies
model_attr = model_attr.remote_attr
return model_attr.property.mapper.class_
@property
def related_keys(self):
if self.columns:
insp = inspect(self.related_model)
return [insp.attrs[column] for column in self.columns]
return get_primary_keys(self.related_model)
@property
def session(self):
return self.root.session
@property
def transient(self):
return self.root.transient
def _serialize(self, value, attr, obj):
ret = {prop.key: getattr(value, prop.key, None) for prop in self.related_keys}
return ret if len(ret) > 1 else list(ret.values())[0]
def _deserialize(self, value, *args, **kwargs):
"""Deserialize a serialized value to a model instance.
If the parent schema is transient, create a new (transient) instance.
Otherwise, attempt to find an existing instance in the database.
:param value: The value to deserialize.
"""
if not isinstance(value, dict):
if len(self.related_keys) != 1:
keys = [prop.key for prop in self.related_keys]
raise self.make_error("invalid", value=value, keys=keys)
value = {self.related_keys[0].key: value}
if self.transient:
return self.related_model(**value)
try:
result = self._get_existing_instance(
self.session.query(self.related_model), value
)
except NoResultFound:
# The related-object DNE in the DB, but we still want to deserialize it
# ...perhaps we want to add it to the DB later
return self.related_model(**value)
return result
def _get_existing_instance(self, query, value):
"""Retrieve the related object from an existing instance in the DB.
:param query: A SQLAlchemy `Query <sqlalchemy.orm.query.Query>` object.
:param value: The serialized value to mapto an existing instance.
:raises NoResultFound: if there is no matching record.
"""
if self.columns:
result = query.filter_by(
**{prop.key: value.get(prop.key) for prop in self.related_keys}
).one()
else:
# Use a faster path if the related key is the primary key.
lookup_values = [value.get(prop.key) for prop in self.related_keys]
try:
result = query.get(lookup_values)
except TypeError:
keys = [prop.key for prop in self.related_keys]
raise self.make_error("invalid", value=value, keys=keys)
if result is None:
raise NoResultFound
return result
class Nested(fields.Nested):
"""Nested field that inherits the session from its parent."""
def _deserialize(self, *args, **kwargs):
if hasattr(self.schema, "session"):
self.schema.session = self.root.session
self.schema.transient = self.root.transient
return super()._deserialize(*args, **kwargs)
| mit | 28e113b1105a7040e3a1876ae8d44ee4 | 36.211268 | 91 | 0.623202 | 4.230584 | false | false | false | false |
pact-foundation/pact-python | pact/matchers.py | 1 | 13955 | """Classes for defining request and response data that is variable."""
import six
import datetime
from enum import Enum
class Matcher(object):
"""Base class for defining complex contract expectations."""
def generate(self):
"""
Get the value that the mock service should use for this Matcher.
:rtype: any
"""
raise NotImplementedError
class EachLike(Matcher):
"""
Expect the data to be a list of similar objects.
Example:
>>> from pact import Consumer, Provider
>>> pact = Consumer('consumer').has_pact_with(Provider('provider'))
>>> (pact.given('there are three comments')
... .upon_receiving('a request for the most recent 2 comments')
... .with_request('get', '/comment', query={'limit': 2})
... .will_respond_with(200, body={
... 'comments': EachLike(
... {'name': SomethingLike('bob'),
... 'text': SomethingLike('Hello!')},
... minimum=2)
... }))
Would expect the response to be a JSON object, with a comments list. In
that list should be at least 2 items, and each item should be a `dict`
with the keys `name` and `text`,
"""
def __init__(self, matcher, minimum=1):
"""
Create a new EachLike.
:param matcher: The expected value that each item in a list should
look like, this can be other matchers.
:type matcher: None, list, dict, int, float, str, unicode, Matcher
:param minimum: The minimum number of items expected.
Must be greater than or equal to 1.
:type minimum: int
"""
self.matcher = matcher
assert minimum >= 1, 'Minimum must be greater than or equal to 1'
self.minimum = minimum
def generate(self):
"""
Generate the value the mock service will return.
:return: A dict containing the information about the contents of the
list and the provided minimum number of items for that list.
:rtype: dict
"""
return {
'json_class': 'Pact::ArrayLike',
'contents': from_term(self.matcher),
'min': self.minimum}
class Like(Matcher):
"""
Expect the type of the value to be the same as matcher.
Example:
>>> from pact import Consumer, Provider
>>> pact = Consumer('consumer').has_pact_with(Provider('provider'))
>>> (pact
... .given('there is a random number generator')
... .upon_receiving('a request for a random number')
... .with_request('get', '/generate-number')
... .will_respond_with(200, body={
... 'number': Like(1111222233334444)
... }))
Would expect the response body to be a JSON object, containing the key
`number`, which would contain an integer. When the consumer runs this
contract, the value `1111222233334444` will be returned by the mock
service, instead of a randomly generated value.
"""
def __init__(self, matcher):
"""
Create a new SomethingLike.
:param matcher: The object that should be expected. The mock service
will return this value. When verified against the provider, the
type of this value will be asserted, while the value will be
ignored.
:type matcher: None, list, dict, int, float, str, unicode, Matcher
"""
valid_types = (
type(None), list, dict, int, float, six.string_types, Matcher)
assert isinstance(matcher, valid_types), (
"matcher must be one of '{}', got '{}'".format(
valid_types, type(matcher)))
self.matcher = matcher
def generate(self):
"""
Return the value that should be used in the request/response.
:return: A dict containing the information about what the contents of
the response should be.
:rtype: dict
"""
return {
'json_class': 'Pact::SomethingLike',
'contents': from_term(self.matcher)}
# Remove SomethingLike in major version 1.0.0
SomethingLike = Like
class Term(Matcher):
"""
Expect the response to match a specified regular expression.
Example:
>>> from pact import Consumer, Provider
>>> pact = Consumer('consumer').has_pact_with(Provider('provider'))
>>> (pact.given('the current user is logged in as `tester`')
... .upon_receiving('a request for the user profile')
... .with_request('get', '/profile')
... .will_respond_with(200, body={
... 'name': 'tester',
... 'theme': Term('light|dark|legacy', 'dark')
... }))
Would expect the response body to be a JSON object, containing the key
`name`, which will contain the value `tester`, and `theme` which must be
one of the values: light, dark, or legacy. When the consumer runs this
contract, the value `dark` will be returned by the mock service.
"""
def __init__(self, matcher, generate):
"""
Create a new Term.
:param matcher: A regular expression to find.
:type matcher: basestring
:param generate: A value to be returned by the mock service when
generating the response to the consumer.
:type generate: basestring
"""
self.matcher = matcher
self._generate = generate
def generate(self):
"""
Return the value that should be used in the request/response.
:return: A dict containing the information about what the contents of
the response should be, and what should match for the requests.
:rtype: dict
"""
return {
'json_class': 'Pact::Term',
'data': {
'generate': self._generate,
'matcher': {
'json_class': 'Regexp',
'o': 0,
's': self.matcher}}}
def from_term(term):
"""
Parse the provided term into the JSON for the mock service.
:param term: The term to be parsed.
:type term: None, list, dict, int, float, str, bytes, unicode, Matcher
:return: The JSON representation for this term.
:rtype: dict, list, str
"""
if term is None:
return term
elif isinstance(term, (six.string_types, six.binary_type, int, float)):
return term
elif isinstance(term, dict):
return {k: from_term(v) for k, v in term.items()}
elif isinstance(term, list):
return [from_term(t) for i, t in enumerate(term)]
elif issubclass(term.__class__, (Matcher,)):
return term.generate()
else:
raise ValueError('Unknown type: %s' % type(term))
def get_generated_values(input):
"""
Resolve (nested) Matchers to their generated values for assertion.
:param input: The input to be resolved to its generated values.
:type input: None, list, dict, int, float, bool, str, unicode, Matcher
:return: The input resolved to its generated value(s)
:rtype: None, list, dict, int, float, bool, str, unicode, Matcher
"""
if input is None:
return input
if isinstance(input, (six.string_types, int, float, bool)):
return input
if isinstance(input, dict):
return {k: get_generated_values(v) for k, v in input.items()}
if isinstance(input, list):
return [get_generated_values(t) for i, t in enumerate(input)]
elif isinstance(input, Like):
return get_generated_values(input.matcher)
elif isinstance(input, EachLike):
return [get_generated_values(input.matcher)] * input.minimum
elif isinstance(input, Term):
return input.generate()['data']['generate']
else:
raise ValueError('Unknown type: %s' % type(input))
class Format:
"""
Class of regular expressions for common formats.
Example:
>>> from pact import Consumer, Provider
>>> from pact.matchers import Format
>>> pact = Consumer('consumer').has_pact_with(Provider('provider'))
>>> (pact.given('the current user is logged in as `tester`')
... .upon_receiving('a request for the user profile')
... .with_request('get', '/profile')
... .will_respond_with(200, body={
... 'id': Format().identifier,
... 'lastUpdated': Format().time
... }))
Would expect `id` to be any valid int and `lastUpdated` to be a valid time.
When the consumer runs this contract, the value of that will be returned
is the second value passed to Term in the given function, for the time
example it would be datetime.datetime(2000, 2, 1, 12, 30, 0, 0).time()
"""
def __init__(self):
"""Create a new Formatter."""
self.identifier = self.integer_or_identifier()
self.integer = self.integer_or_identifier()
self.decimal = self.decimal()
self.ip_address = self.ip_address()
self.hexadecimal = self.hexadecimal()
self.ipv6_address = self.ipv6_address()
self.uuid = self.uuid()
self.timestamp = self.timestamp()
self.date = self.date()
self.time = self.time()
def integer_or_identifier(self):
"""
Match any integer.
:return: a Like object with an integer.
:rtype: Like
"""
return Like(1)
def decimal(self):
"""
Match any decimal.
:return: a Like object with a decimal.
:rtype: Like
"""
return Like(1.0)
def ip_address(self):
"""
Match any ip address.
:return: a Term object with an ip address regex.
:rtype: Term
"""
return Term(self.Regexes.ip_address.value, '127.0.0.1')
def hexadecimal(self):
"""
Match any hexadecimal.
:return: a Term object with a hexdecimal regex.
:rtype: Term
"""
return Term(self.Regexes.hexadecimal.value, '3F')
def ipv6_address(self):
"""
Match any ipv6 address.
:return: a Term object with an ipv6 address regex.
:rtype: Term
"""
return Term(self.Regexes.ipv6_address.value, '::ffff:192.0.2.128')
def uuid(self):
"""
Match any uuid.
:return: a Term object with a uuid regex.
:rtype: Term
"""
return Term(
self.Regexes.uuid.value, 'fc763eba-0905-41c5-a27f-3934ab26786c'
)
def timestamp(self):
"""
Match any timestamp.
:return: a Term object with a timestamp regex.
:rtype: Term
"""
return Term(
self.Regexes.timestamp.value, datetime.datetime(
2000, 2, 1, 12, 30, 0, 0
).isoformat()
)
def date(self):
"""
Match any date.
:return: a Term object with a date regex.
:rtype: Term
"""
return Term(
self.Regexes.date.value, datetime.datetime(
2000, 2, 1, 12, 30, 0, 0
).date().isoformat()
)
def time(self):
"""
Match any time.
:return: a Term object with a time regex.
:rtype: Term
"""
return Term(
self.Regexes.time_regex.value, datetime.datetime(
2000, 2, 1, 12, 30, 0, 0
).time().isoformat()
)
class Regexes(Enum):
"""Regex Enum for common formats."""
ip_address = r'(\d{1,3}\.)+\d{1,3}'
hexadecimal = r'[0-9a-fA-F]+'
ipv6_address = r'(\A([0-9a-f]{1,4}:){1,1}(:[0-9a-f]{1,4}){1,6}\Z)|' \
r'(\A([0-9a-f]{1,4}:){1,2}(:[0-9a-f]{1,4}){1,5}\Z)|(\A([0-9a-f]' \
r'{1,4}:){1,3}(:[0-9a-f]{1,4}){1,4}\Z)|(\A([0-9a-f]{1,4}:)' \
r'{1,4}(:[0-9a-f]{1,4}){1,3}\Z)|(\A([0-9a-f]{1,4}:){1,5}(:[0-' \
r'9a-f]{1,4}){1,2}\Z)|(\A([0-9a-f]{1,4}:){1,6}(:[0-9a-f]{1,4})' \
r'{1,1}\Z)|(\A(([0-9a-f]{1,4}:){1,7}|:):\Z)|(\A:(:[0-9a-f]{1,4})' \
r'{1,7}\Z)|(\A((([0-9a-f]{1,4}:){6})(25[0-5]|2[0-4]\d|[0-1]' \
r'?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3})\Z)|(\A(([0-9a-f]' \
r'{1,4}:){5}[0-9a-f]{1,4}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25' \
r'[0-5]|2[0-4]\d|[0-1]?\d?\d)){3})\Z)|(\A([0-9a-f]{1,4}:){5}:[' \
r'0-9a-f]{1,4}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4' \
r']\d|[0-1]?\d?\d)){3}\Z)|(\A([0-9a-f]{1,4}:){1,1}(:[0-9a-f]' \
r'{1,4}){1,4}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]' \
r'\d|[0-1]?\d?\d)){3}\Z)|(\A([0-9a-f]{1,4}:){1,2}(:[0-9a-f]{1,4}' \
r'){1,3}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0' \
r'-1]?\d?\d)){3}\Z)|(\A([0-9a-f]{1,4}:){1,3}(:[0-9a-f]{1,4}){1,' \
r'2}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]' \
r'?\d?\d)){3}\Z)|(\A([0-9a-f]{1,4}:){1,4}(:[0-9a-f]{1,4}){1,1}:' \
r'(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?' \
r'\d)){3}\Z)|(\A(([0-9a-f]{1,4}:){1,5}|:):(25[0-5]|2[0-4]\d|[0' \
r'-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z)|(\A:(:[' \
r'0-9a-f]{1,4}){1,5}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]' \
r'|2[0-4]\d|[0-1]?\d?\d)){3}\Z)'
uuid = r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'
timestamp = r'^([\+-]?\d{4}(?!\d{2}\b))((-?)((0[1-9]|1[0-2])(\3(' \
r'[12]\d|0[1-9]|3[01]))?|W([0-4]\d|5[0-2])(-?[1-7])?|(00[1-' \
r'9]|0[1-9]\d|[12]\d{2}|3([0-5]\d|6[1-6])))([T\s]((([01]\d|2' \
r'[0-3])((:?)[0-5]\d)?|24\:?00)([\.,]\d+(?!:))?)?(\17[0-5]\d' \
r'([\.,]\d+)?)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?)?)?$'
date = r'^([\+-]?\d{4}(?!\d{2}\b))((-?)((0[1-9]|1[0-2])(\3([12]\d|' \
r'0[1-9]|3[01]))?|W([0-4]\d|5[0-2])(-?[1-7])?|(00[1-9]|0[1-9]\d|' \
r'[12]\d{2}|3([0-5]\d|6[1-6])))?)'
time_regex = r'^(T\d\d:\d\d(:\d\d)?(\.\d+)?(([+-]\d\d:\d\d)|Z)?)?$'
| mit | 09bcc2e7984ac912462d5bdd71f5cdbb | 33.8875 | 79 | 0.530992 | 3.210999 | false | false | false | false |
pact-foundation/pact-python | examples/message/tests/provider/test_message_provider.py | 1 | 2083 | import pytest
from pact import MessageProvider
PACT_BROKER_URL = "http://localhost"
PACT_BROKER_USERNAME = "pactbroker"
PACT_BROKER_PASSWORD = "pactbroker"
PACT_DIR = "pacts"
@pytest.fixture
def default_opts():
return {
'broker_username': PACT_BROKER_USERNAME,
'broker_password': PACT_BROKER_PASSWORD,
'broker_url': PACT_BROKER_URL,
'publish_version': '3',
'publish_verification_results': False
}
def document_created_handler():
return {
"event": "ObjectCreated:Put",
"documentName": "document.doc",
"creator": "TP",
"documentType": "microsoft-word"
}
def document_deleted_handler():
return {
"event": "ObjectCreated:Delete",
"documentName": "document.doc",
"creator": "TP",
"documentType": "microsoft-word"
}
def test_verify_success():
provider = MessageProvider(
message_providers={
'A document created successfully': document_created_handler,
'A document deleted successfully': document_deleted_handler
},
provider='ContentProvider',
consumer='DetectContentLambda',
pact_dir='pacts'
)
with provider:
provider.verify()
def test_verify_failure_when_a_provider_missing():
provider = MessageProvider(
message_providers={
'A document created successfully': document_created_handler,
},
provider='ContentProvider',
consumer='DetectContentLambda',
pact_dir='pacts'
)
with pytest.raises(AssertionError):
with provider:
provider.verify()
def test_verify_from_broker(default_opts):
provider = MessageProvider(
message_providers={
'A document created successfully': document_created_handler,
'A document deleted successfully': document_deleted_handler
},
provider='ContentProvider',
consumer='DetectContentLambda',
pact_dir='pacts'
)
with provider:
provider.verify_with_broker(**default_opts)
| mit | 9f44c00980b66ddf04cf0b1f92b0d54c | 24.096386 | 72 | 0.62266 | 4.166 | false | true | false | false |
warner/foolscap | src/foolscap/tokens.py | 1 | 17073 | import six
from twisted.python.failure import Failure
from zope.interface import Attribute, Interface
# delimiter characters.
LIST = six.int2byte(0x80) # old
INT = six.int2byte(0x81)
STRING = six.int2byte(0x82)
NEG = six.int2byte(0x83)
FLOAT = six.int2byte(0x84)
# "optional" -- these might be refused by a low-level implementation.
LONGINT = six.int2byte(0x85) # old
LONGNEG = six.int2byte(0x86) # old
# really optional; this is is part of the 'pb' vocabulary
VOCAB = six.int2byte(0x87)
# newbanana tokens
OPEN = six.int2byte(0x88)
CLOSE = six.int2byte(0x89)
ABORT = six.int2byte(0x8A)
ERROR = six.int2byte(0x8D)
PING = six.int2byte(0x8E)
PONG = six.int2byte(0x8F)
tokenNames = {
LIST: "LIST",
INT: "INT",
STRING: "STRING",
NEG: "NEG",
FLOAT: "FLOAT",
LONGINT: "LONGINT",
LONGNEG: "LONGNEG",
VOCAB: "VOCAB",
OPEN: "OPEN",
CLOSE: "CLOSE",
ABORT: "ABORT",
ERROR: "ERROR",
PING: "PING",
PONG: "PONG",
}
SIZE_LIMIT = 1000 # default limit on the body length of long tokens (STRING,
# LONGINT, LONGNEG, ERROR)
class InvalidRemoteInterface(Exception):
pass
class UnknownSchemaType(Exception):
pass
class Violation(Exception):
"""This exception is raised in response to a schema violation. It
indicates that the incoming token stream has violated a constraint
imposed by the recipient. The current Unslicer is abandoned and the
error is propagated upwards to the enclosing Unslicer parent by
providing an BananaFailure object to the parent's .receiveChild method.
All remaining tokens for the current Unslicer are to be dropped.
"""
""".where: this string describes which node of the object graph was
being handled when the exception took place."""
where = ""
def setLocation(self, where):
self.where = where
def getLocation(self):
return self.where
def prependLocation(self, prefix):
if self.where:
self.where = prefix + " " + self.where
else:
self.where = prefix
def appendLocation(self, suffix):
if self.where:
self.where = self.where + " " + suffix
else:
self.where = suffix
def __str__(self):
if self.where:
return "Violation (%s): %s" % (self.where, self.args)
else:
return "Violation: %s" % (self.args,)
class RemoteException(Exception):
"""When the Tub is in expose-remote-exception-types=False mode, this
exception is raised in response to any remote exception. It wraps a
CopiedFailure, which can be examined by callers who want to know more
than the fact that something failed on the remote end."""
def __init__(self, failure):
self.failure = failure
def __str__(self):
return "<RemoteException around '%s'>" % str(self.failure)
class BananaError(Exception):
"""This exception is raised in response to a fundamental protocol
violation. The connection should be dropped immediately.
.where is an optional string that describes the node of the object graph
where the failure was noticed.
"""
where = None
def __str__(self):
if self.where:
return "BananaError(in %s): %s" % (self.where, self.args)
else:
return "BananaError: %s" % (self.args,)
class NegotiationError(Exception):
pass
class DuplicateConnection(NegotiationError):
pass
class RemoteNegotiationError(Exception):
"""The other end hung up on us because they had a NegotiationError on
their side."""
pass
class PBError(Exception):
pass
class BananaFailure(Failure):
"""This is a marker subclass of Failure, to let Unslicer.receiveChild
distinguish between an unserialized Failure instance and a a failure in
a child Unslicer"""
pass
class WrongTubIdError(Exception):
"""getReference(furlFile=) used a FURL with a different TubID"""
class WrongNameError(Exception):
"""getReference(furlFule=) used a FURL with a different name"""
class NoLocationError(Exception):
"""This Tub has no location set, so we cannot make references to it."""
class NoLocationHintsError(Exception):
"""We cannot make a connection without some location hints"""
class ISlicer(Interface):
"""I know how to slice objects into tokens."""
sendOpen = Attribute(\
"""True if an OPEN/CLOSE token pair should be sent around the Slicer's body
tokens. Only special-purpose Slicers (like the RootSlicer) should use False.
""")
trackReferences = Attribute(\
"""True if the object we slice is referenceable: i.e. it is useful or
necessary to send multiple copies as a single instance and a bunch of
References, rather than as separate copies. Instances are referenceable, as
are mutable containers like lists.""")
streamable = Attribute(\
"""True if children of this object are allowed to use Deferreds to stall
production of new tokens. This must be set in slice() before yielding each
child object, and affects that child and all descendants. Streaming is only
allowed if the parent also allows streaming: if slice() is called with
streamable=False, then self.streamable must be False too. It can be changed
from within the slice() generator at any time as long as this restriction is
obeyed.
This attribute is read when each child Slicer is started.""")
def slice(streamable, banana):
"""Return an iterator which provides Index Tokens and the Body
Tokens of the object's serialized form. This is frequently
implemented with a generator (i.e. 'yield' appears in the body of
this function). Do not yield the OPEN or the CLOSE token, those will
be handled elsewhere.
If a Violation exception is raised, slicing will cease. An ABORT
token followed by a CLOSE token will be emitted.
If 'streamable' is True, the iterator may yield a Deferred to
indicate that slicing should wait until the Deferred is fired. If
the Deferred is errbacked, the connection will be dropped. TODO: it
should be possible to errback with a Violation."""
def registerRefID(refid, obj):
"""Register the relationship between 'refid' (a number taken from
the cumulative count of OPEN tokens sent over our connection: 0 is
the object described by the very first OPEN sent over the wire) and
the object. If the object is sent a second time, a Reference may be
used in its place.
Slicers usually delgate this function upwards to the RootSlicer, but
it can be handled at any level to allow local scoping of references
(they might only be valid within a single RPC invocation, for
example).
This method is *not* allowed to raise a Violation, as that will mess
up the transmit logic. If it raises any other exception, the
connection will be dropped."""
def childAborted(f):
"""Notify the Slicer that one of its child slicers (as produced by
its .slice iterator) has caused an error. If the slicer got started,
it has now emitted an ABORT token and terminated its token stream.
If it did not get started (usually because the child object was
unserializable), there has not yet been any trace of the object in
the token stream.
The corresponding Unslicer (receiving this token stream) will get an
BananaFailure and is likely to ignore any remaining tokens from us,
so it may be reasonable for the parent Slicer to give up as well.
If the Slicer wishes to abandon their own sequence, it should simply
return the failure object passed in. If it wants to absorb the
error, it should return None."""
def slicerForObject(obj):
"""Get a new Slicer for some child object. Slicers usually delegate
this method up to the RootSlicer. References are handled by
producing a ReferenceSlicer here. These references can have various
scopes.
If something on the stack does not want the object to be sent, it can
raise a Violation exception. This is the 'taster' function."""
def describe():
"""Return a short string describing where in the object tree this
slicer is sitting, relative to its parent. These strings are
obtained from every slicer in the stack, and joined to describe
where any problems occurred."""
class IRootSlicer(Interface):
def allowStreaming(streamable):
"""Specify whether or not child Slicers will be allowed to stream."""
def connectionLost(why):
"""Called when the transport is closed. The RootSlicer may choose to
abandon objects being sent here."""
class IUnslicer(Interface):
# .parent
# start/receiveChild/receiveClose/finish are
# the main "here are some tokens, make an object out of them" entry
# points used by Unbanana.
# start/receiveChild can call self.protocol.abandonUnslicer(failure,
# self) to tell the protocol that the unslicer has given up on life and
# all its remaining tokens should be discarded. The failure will be
# given to the late unslicer's parent in lieu of the object normally
# returned by receiveClose.
# start/receiveChild/receiveClose/finish may raise a Violation
# exception, which tells the protocol that this object is contaminated
# and should be abandoned. An BananaFailure will be passed to its
# parent.
# Note, however, that it is not valid to both call abandonUnslicer *and*
# raise a Violation. That would discard too much.
def setConstraint(constraint):
"""Add a constraint for this unslicer. The unslicer will enforce
this constraint upon all incoming data. The constraint must be of an
appropriate type (a ListUnslicer will only accept a ListConstraint,
etc.). It must not be None. To leave us unconstrained, do not call
this method.
If this method is not called, the Unslicer will accept any valid
banana as input, which probably means there is no limit on the
number of bytes it will accept (and therefore on the memory it could
be made to consume) before it finally accepts or rejects the input.
"""
def start(count):
"""Called to initialize the new slice. The 'count' argument is the
reference id: if this object might be shared (and therefore the
target of a 'reference' token), it should call
self.protocol.setObject(count, obj) with the object being created.
If this object is not available yet (tuples), it should save a
Deferred there instead.
"""
def checkToken(typebyte, size):
"""Check to see if the given token is acceptable (does it conform to
the constraint?). It will not be asked about ABORT or CLOSE tokens,
but it *will* be asked about OPEN. It should enfore a length limit
for long tokens (STRING and LONGINT/LONGNEG types). If STRING is
acceptable, then VOCAB should be too. It should return None if the
token and the size are acceptable. Should raise Violation if the
schema indiates the token is not acceptable. Should raise
BananaError if the type byte violates the basic Banana protocol. (if
no schema is in effect, this should never raise Violation, but might
still raise BananaError).
"""
def openerCheckToken(typebyte, size, opentype):
"""'typebyte' is the type of an incoming index token. 'size' is the
value of header associated with this typebyte. 'opentype' is a list
of open tokens that we've received so far, not including the one
that this token hopes to create.
This method should ask the current opener if this index token is
acceptable, and is used in lieu of checkToken() when the receiver is
in the index phase. Usually implemented by calling
self.opener.openerCheckToken, thus delegating the question to the
RootUnslicer.
"""
def doOpen(opentype):
"""opentype is a tuple. Return None if more index tokens are
required. Check to see if this kind of child object conforms to the
constraint, raise Violation if not. Create a new Unslicer (usually
by delegating to self.parent.doOpen, up to the RootUnslicer). Set a
constraint on the child unslicer, if any.
"""
def receiveChild(childobject,
ready_deferred):
"""'childobject' is being handed to this unslicer. It may be a
primitive type (number or string), or a composite type produced by
another Unslicer. It might also be a Deferred, which indicates that
the actual object is not ready (perhaps a tuple with an element that
is not yet referenceable), in which case you should add a callback
to it that will fill in the appropriate object later. This callback
is required to return the object when it is done, so multiple such
callbacks can be chained. The childobject/ready_deferred argument
pair is taken directly from the output of receiveClose(). If
ready_deferred is non-None, you should return a dependent Deferred
from your own receiveClose method."""
def reportViolation(bf):
"""You have received an error instead of a child object. If you wish
to give up and propagate the error upwards, return the BananaFailure
object you were just given. To absorb the error and keep going with
your sequence, return None."""
def receiveClose():
"""Called when the Close token is received. Returns a tuple of
(object/referenceable-deferred, complete-deferred), or an
BananaFailure if something went wrong. There are four potential
cases::
(obj, None): the object is complete and ready to go
(d1, None): the object cannot be referenced yet, probably
because it is an immutable container, and one of its
children cannot be referenced yet. The deferred will
fire by the time the cycle has been fully deserialized,
with the object as its argument.
(obj, d2): the object can be referenced, but it is not yet
complete, probably because some component of it is
'slow' (see below). The Deferred will fire (with an
argument of None) when the object is ready to be used.
It is not guaranteed to fire by the time the enclosing
top-level object has finished deserializing.
(d1, d2): the object cannot yet be referenced, and even if it could
be, it would not yet be ready for use. Any potential users
should wait until both deferreds fire before using it.
The first deferred (d1) is guaranteed to fire before the top-most
enclosing object (a CallUnslicer, for PB methods) is closed. (if it
does not fire, that indicates a broken cycle). It is present to
handle cycles that include immutable containers, like tuples.
Mutable containers *must* return a reference to an object (even if
it is not yet ready to be used, because it contains placeholders to
tuples that have not yet been created), otherwise those cycles
cannot be broken and the object graph will not reconstructable.
The second (d2) has no such guarantees about when it will fire. It
indicates a dependence upon 'slow' external events. The first use
case for such 'slow' objects is a globally-referenceable object
which requires a new Broker connection before it can be used, so the
Deferred will not fire until a TCP connection has been established
and the first stages of PB negotiation have been completed.
If necessary, unbanana.setObject should be called, then the Deferred
created in start() should be fired with the new object."""
def finish():
"""Called when the unslicer is popped off the stack. This is called
even if the pop is because of an exception. The unslicer should
perform cleanup, including firing the Deferred with an
BananaFailure if the object it is creating could not be created.
TODO: can receiveClose and finish be merged? Or should the child
object be returned from finish() instead of receiveClose?
"""
def describe():
"""Return a short string describing where in the object tree this
unslicer is sitting, relative to its parent. These strings are
obtained from every unslicer in the stack, and joined to describe
where any problems occurred."""
def where():
"""This returns a string that describes the location of this
unslicer, starting at the root of the object tree."""
| mit | 21d33de835dc67749ebc96a5697a7b81 | 42.776923 | 77 | 0.68295 | 4.388946 | false | false | false | false |
warner/foolscap | src/foolscap/slicers/tuple.py | 1 | 4685 | # -*- test-case-name: foolscap.test.test_banana -*-
from __future__ import print_function
from twisted.internet.defer import Deferred
from foolscap.tokens import Violation
from foolscap.slicer import BaseUnslicer
from foolscap.slicers.list import ListSlicer
from foolscap.constraint import OpenerConstraint, Any, IConstraint
from foolscap.util import AsyncAND
class TupleSlicer(ListSlicer):
opentype = ("tuple",)
slices = tuple
class TupleUnslicer(BaseUnslicer):
opentype = ("tuple",)
debug = False
constraints = None
def setConstraint(self, constraint):
if isinstance(constraint, Any):
return
assert isinstance(constraint, TupleConstraint)
self.constraints = constraint.constraints
def start(self, count):
self.list = []
# indices of .list which are unfilled because of children that could
# not yet be referenced
self.num_unreferenceable_children = 0
self.count = count
if self.debug:
print("%s[%d].start with %s" % (self, self.count, self.list))
self.finished = False
self.deferred = Deferred()
self.protocol.setObject(count, self.deferred)
self._ready_deferreds = []
def checkToken(self, typebyte, size):
if self.constraints == None:
return
if len(self.list) >= len(self.constraints):
raise Violation("the tuple is full")
self.constraints[len(self.list)].checkToken(typebyte, size)
def doOpen(self, opentype):
where = len(self.list)
if self.constraints != None:
if where >= len(self.constraints):
raise Violation("the tuple is full")
self.constraints[where].checkOpentype(opentype)
unslicer = self.open(opentype)
if unslicer:
if self.constraints != None:
unslicer.setConstraint(self.constraints[where])
return unslicer
def update(self, obj, index):
if self.debug:
print("%s[%d].update: [%d]=%s" % (self, self.count, index, obj))
self.list[index] = obj
self.num_unreferenceable_children -= 1
if self.finished:
self.checkComplete()
return obj
def receiveChild(self, obj, ready_deferred=None):
if ready_deferred:
self._ready_deferreds.append(ready_deferred)
if isinstance(obj, Deferred):
obj.addCallback(self.update, len(self.list))
obj.addErrback(self.explode)
self.num_unreferenceable_children += 1
self.list.append("placeholder")
else:
self.list.append(obj)
def checkComplete(self):
if self.debug:
print("%s[%d].checkComplete: %d pending" % \
(self, self.count, self.num_unreferenceable_children))
if self.num_unreferenceable_children:
# not finished yet, we'll fire our Deferred when we are
if self.debug:
print(" not finished yet")
return
# list is now complete. We can finish.
return self.complete()
def complete(self):
ready_deferred = None
if self._ready_deferreds:
ready_deferred = AsyncAND(self._ready_deferreds)
t = tuple(self.list)
if self.debug:
print(" finished! tuple:%s{%s}" % (t, id(t)))
self.protocol.setObject(self.count, t)
self.deferred.callback(t)
return t, ready_deferred
def receiveClose(self):
if self.debug:
print("%s[%d].receiveClose" % (self, self.count))
self.finished = 1
if self.num_unreferenceable_children:
# not finished yet, we'll fire our Deferred when we are
if self.debug:
print(" not finished yet")
ready_deferred = None
if self._ready_deferreds:
ready_deferred = AsyncAND(self._ready_deferreds)
return self.deferred, ready_deferred
# the list is already complete
return self.complete()
def describe(self):
return "[%d]" % len(self.list)
class TupleConstraint(OpenerConstraint):
opentypes = [("tuple",)]
name = "TupleConstraint"
def __init__(self, *elemConstraints):
self.constraints = [IConstraint(e) for e in elemConstraints]
def checkObject(self, obj, inbound):
if not isinstance(obj, tuple):
raise Violation("not a tuple")
if len(obj) != len(self.constraints):
raise Violation("wrong size tuple")
for i in range(len(self.constraints)):
self.constraints[i].checkObject(obj[i], inbound)
| mit | 876c4b51d32d7061fb72c935dc9b5769 | 32.949275 | 76 | 0.60555 | 4.091703 | false | false | false | false |
warner/foolscap | src/foolscap/test/test_crypto.py | 1 | 4158 |
import re
from twisted.trial import unittest
from zope.interface import implementer
from twisted.internet import defer
from foolscap import pb
from foolscap.api import RemoteInterface, Referenceable, Tub, flushEventualQueue
from foolscap.remoteinterface import RemoteMethodSchema
from foolscap.util import allocate_tcp_port
class RIMyCryptoTarget(RemoteInterface):
# method constraints can be declared directly:
add1 = RemoteMethodSchema(_response=int, a=int, b=int)
# or through their function definitions:
def add(a=int, b=int): return int
#add = schema.callable(add) # the metaclass makes this unnecessary
# but it could be used for adding options or something
def join(a=str, b=str, c=int): return str
def getName(): return str
@implementer(RIMyCryptoTarget)
class Target(Referenceable):
def __init__(self, name=None):
self.calls = []
self.name = name
def getMethodSchema(self, methodname):
return None
def remote_add(self, a, b):
self.calls.append((a,b))
return a+b
remote_add1 = remote_add
def remote_getName(self):
return self.name
def remote_disputed(self, a):
return 24
def remote_fail(self):
raise ValueError("you asked me to fail")
class UsefulMixin:
num_services = 2
def setUp(self):
self.services = []
for i in range(self.num_services):
s = Tub()
s.startService()
self.services.append(s)
def tearDown(self):
d = defer.DeferredList([s.stopService() for s in self.services])
d.addCallback(self._tearDown_1)
return d
def _tearDown_1(self, res):
return flushEventualQueue()
class TestPersist(UsefulMixin, unittest.TestCase):
num_services = 2
def testPersist(self):
t1 = Target()
s1,s2 = self.services
port = allocate_tcp_port()
s1.listenOn("tcp:%d:interface=127.0.0.1" % port)
s1.setLocation("127.0.0.1:%d" % port)
public_url = s1.registerReference(t1, "name")
self.assertTrue(public_url.startswith("pb:"))
d = defer.maybeDeferred(s1.stopService)
d.addCallback(self._testPersist_1, s1, s2, t1, public_url, port)
return d
testPersist.timeout = 5
def _testPersist_1(self, res, s1, s2, t1, public_url, port):
self.services.remove(s1)
s3 = Tub(certData=s1.getCertData())
s3.startService()
self.services.append(s3)
t2 = Target()
newport = allocate_tcp_port()
s3.listenOn("tcp:%d:interface=127.0.0.1" % newport)
s3.setLocation("127.0.0.1:%d" % newport)
s3.registerReference(t2, "name")
# now patch the URL to replace the port number
newurl = re.sub(":%d/" % port, ":%d/" % newport, public_url)
d = s2.getReference(newurl)
d.addCallback(lambda rr: rr.callRemote("add", a=1, b=2))
d.addCallback(self.assertEqual, 3)
d.addCallback(self._testPersist_2, t1, t2)
return d
def _testPersist_2(self, res, t1, t2):
self.assertEqual(t1.calls, [])
self.assertEqual(t2.calls, [(1,2)])
class TestListeners(UsefulMixin, unittest.TestCase):
num_services = 3
def testListenOn(self):
s1 = self.services[0]
l = s1.listenOn("tcp:%d:interface=127.0.0.1" % allocate_tcp_port())
self.assertTrue(isinstance(l, pb.Listener))
self.assertEqual(len(s1.getListeners()), 1)
s1.stopListeningOn(l)
self.assertEqual(len(s1.getListeners()), 0)
def testGetPort1(self):
s1,s2,s3 = self.services
s1.listenOn("tcp:%d:interface=127.0.0.1" % allocate_tcp_port())
listeners = s1.getListeners()
self.assertEqual(len(listeners), 1)
def testGetPort2(self):
s1,s2,s3 = self.services
s1.listenOn("tcp:%d:interface=127.0.0.1" % allocate_tcp_port())
listeners = s1.getListeners()
self.assertEqual(len(listeners), 1)
# listen on a second port too
s1.listenOn("tcp:%d:interface=127.0.0.1" % allocate_tcp_port())
l2 = s1.getListeners()
self.assertEqual(len(l2), 2)
| mit | 765f56612f46b5c337745f126c4171d6 | 34.237288 | 80 | 0.632035 | 3.315789 | false | true | false | false |
warner/foolscap | src/foolscap/logging/interfaces.py | 1 | 6341 |
from zope.interface import Interface
from foolscap.remoteinterface import RemoteInterface
from foolscap.schema import DictOf, ListOf, Any, Optional, ChoiceOf
TubID = Any() # printable, base32 encoded
Incarnation = (Any(), ChoiceOf(Any(), None))
Header = DictOf(Any(), Any())
Event = DictOf(Any(), Any()) # this has message:, level:, facility:, etc
EventWrapper = DictOf(Any(), Any()) # this has from:, rx_time:, and d:
class RILogObserver(RemoteInterface):
__remote_name__ = "RILogObserver.foolscap.lothar.com"
def msg(logmsg=Event):
return None
def done():
return None
def new_incident(name=Any(), trigger=Event):
# should this give (tubid, incarnation, trigger) like list_incidents?
return None
def done_with_incident_catchup():
return None
class RILogFile(RemoteInterface):
__remote_name__ = "RILogFile.foolscap.lothar.com"
def get_header():
# (tubid, incarnation,
# (first_event: number, time), (last_event: number, time),
# num_events,
# level_map, # maps string severity to count of messages
# )
return (TubID, int, (int, int), (int, int), int, DictOf(Any(), int))
def get_events(receiver=RILogObserver):
"""The designated receiver will be sent every event in the logfile,
followed by a done() call."""
return None
class RISubscription(RemoteInterface):
__remote_name__ = "RISubscription.foolscap.lothar.com"
def unsubscribe():
"""Cancel a subscription. Once this method has been completed (and
its Deferred has fired), no further messages will be received by the
observer (i.e. the response to unsubscribe() will wait until all
pending messages have been queued).
This method is idempotent: calling it multiple times has the same
effect as calling it just once."""
return None
class RILogPublisher(RemoteInterface):
__remote_name__ = "RILogPublisher.foolscap.lothar.com"
def get_versions():
return DictOf(Any(), Any())
def get_pid():
return int
def subscribe_to_all(observer=RILogObserver,
catch_up=Optional(bool, False)):
"""
Call unsubscribe() on the returned RISubscription object to stop
receiving messages.
"""
return RISubscription
def unsubscribe(subscription=Any()):
# NOTE: this is deprecated. Use subscription.unsubscribe() instead.
# I don't know how to get the constraint right: unsubscribe() should
# accept return value of subscribe_to_all()
return None
def enumerate_logfiles():
return ListOf(RILogFile)
# Incident support
def list_incidents(since=Optional(Any(), "")):
"""Return a dict that maps an 'incident name' (a string of the form
'incident-TIMESTAMP-UNIQUE') to the triggering event (a single event
dictionary). The incident name can be passed to get_incident() to
obtain the list of events (including header) contained inside the
incident report. Incident names will sort in chronological order.
If the optional since= argument is provided, then this will only
return incident names that are alphabetically greater (and thus
chronologically later) than the given string. This can be used to
poll an application for incidents that have occurred since a previous
query. For real-time reporting, use subscribe_to_incidents() instead.
"""
return DictOf(Any(), Event)
def subscribe_to_incidents(observer=RILogObserver,
catch_up=Optional(bool, False),
since=Optional(Any(), "")):
"""Subscribe to hear about new Incidents, optionally catching up on
old ones.
Each new Incident will be reported by name+trigger to the observer by
a new_incident() message. This message will be sent after the
incident reporter has finished working (usually a few seconds after
the triggering event).
If catch_up=True, then old Incidents will be sent to the observer
before any new ones are reported. When the publisher has finished
sending the names of all old events, it will send a
done_with_incident_catchup() message to the observer. Only old
Incidents with a name that is alphabetically greater (and thus later)
than the since= argument will be sent. Use since='' to catch up on
all old Incidents.
Call unsubscribe() on the returned RISubscription object to stop
receiving messages.
"""
return RISubscription
def get_incident(incident_name=Any()):
"""Given an incident name, return the header dict and list of event
dicts for that incident."""
# note that this puts all the events in memory at the same time, but
# we expect the logfiles to be of a reasonable size: not much larger
# than the circular buffers that we keep around anyways.
return (Header, ListOf(Event))
class RILogGatherer(RemoteInterface):
__remote_name__ = "RILogGatherer.foolscap.lothar.com"
def logport(nodeid=TubID, logport=RILogPublisher):
return None
class IIncidentReporter(Interface):
def incident_declared(triggering_event):
"""This is called when an Incident needs to be recorded."""
def new_trigger(triggering_event):
"""This is called when a triggering event occurs while an incident is
already being reported. If the event happened later, it would trigger
a new incident. Since it overlapped with the existing incident, it
will just be added to that incident.
The triggering event will also be reported through the usual
event-publish-subscribe mechanism. This method is provided to give
the reporter the opportunity to mark the event somehow, for the
benefit of incident-file analysis tools.
"""
def is_active():
"""Returns True if the reporter is still running. While in this
state, new Incident triggers will be passed to the existing reporter
instead of causing a new Incident to be declared. This will tend to
coalesce back-to-back problems into a single Incident."""
| mit | 0537248ba20b9d655bfe7798fe9e1b23 | 42.136054 | 77 | 0.667087 | 4.224517 | false | false | false | false |
warner/foolscap | src/foolscap/logging/gatherer.py | 1 | 22586 | from __future__ import print_function, unicode_literals
import six, os, sys, time, bz2
signal = None
try:
import signal
except ImportError:
pass
from zope.interface import implementer
from twisted.internet import reactor, utils, defer
from twisted.python import usage, procutils, filepath, log as tw_log
from twisted.application import service, internet
from foolscap.api import Tub, Referenceable
from foolscap.logging.interfaces import RILogGatherer, RILogObserver
from foolscap.logging.incident import IncidentClassifierBase, TIME_FORMAT
from foolscap.logging import flogfile
from foolscap.util import move_into_place
class BadTubID(Exception):
pass
class ObsoleteGatherer(Exception):
pass
class GatheringBase(service.MultiService, Referenceable):
# requires self.furlFile and self.tacFile to be set on the class, both of
# which should be relative to the basedir.
use_local_addresses = True
def __init__(self, basedir):
service.MultiService.__init__(self)
if basedir is None:
# This instance was created by a gatherer.tac file. Confirm that
# we're running from the right directory (the one with the .tac
# file), otherwise we'll put the logfiles in the wrong place.
basedir = os.getcwd()
tac = os.path.join(basedir, self.tacFile)
if not os.path.exists(tac):
raise RuntimeError("running in the wrong directory")
self.basedir = basedir
certFile = os.path.join(self.basedir, "gatherer.pem")
portfile = os.path.join(self.basedir, "port")
locationfile = os.path.join(self.basedir, "location")
furlFile = os.path.join(self.basedir, self.furlFile)
# Foolscap-0.11.0 was the last release that used
# automatically-determined listening addresses and ports. New ones
# (created with "flogtool create-gatherer" or
# "create-incident-gathererer" now require --location and --port
# arguments to provide these values. If you really don't want to
# create a new one, you can write "tcp:3117" (or some other port
# number of your choosing) to BASEDIR/port, and "tcp:$HOSTNAME:3117"
# (with your hostname or IP address) to BASEDIR/location
if (not os.path.exists(portfile) or
not os.path.exists(locationfile)):
raise ObsoleteGatherer("Please create a new gatherer, with both "
"--port and --location")
try:
with open(portfile, "r") as f:
port = f.read().strip()
except EnvironmentError:
raise ObsoleteGatherer("Please create a new gatherer, with both "
"--port and --location")
try:
with open(locationfile, "r") as f:
location = f.read().strip()
except EnvironmentError:
raise ObsoleteGatherer("Please create a new gatherer, with both "
"--port and --location")
self._tub = Tub(certFile=certFile)
self._tub.setServiceParent(self)
self._tub.listenOn(port)
self._tub.setLocation(location)
self.my_furl = self._tub.registerReference(self, furlFile=furlFile)
if self.verbose:
print("Gatherer waiting at:", self.my_furl)
class CreateGatherOptions(usage.Options):
"""flogtool create-gatherer GATHERER_DIRECTORY"""
stdout = sys.stdout
stderr = sys.stderr
optFlags = [
("bzip", "b", "Compress each output file with bzip2"),
("quiet", "q", "Don't print instructions to stdout"),
]
optParameters = [
("port", "p", "tcp:3117", "TCP port to listen on (strports string)"),
("location", "l", None, "(required) Tub location hints to use in generated FURLs. e.g. 'tcp:example.org:3117'"),
("rotate", "r", None,
"Rotate the output file every N seconds."),
]
def opt_port(self, port):
assert not port.startswith("ssl:")
assert port != "tcp:0"
self["port"] = port
def parseArgs(self, gatherer_dir):
self["basedir"] = gatherer_dir
def postOptions(self):
if not self["location"]:
raise usage.UsageError("--location= is mandatory")
@implementer(RILogObserver)
class Observer(Referenceable):
def __init__(self, nodeid_s, gatherer):
self.nodeid_s = nodeid_s # printable string
self.gatherer = gatherer
def remote_msg(self, d):
self.gatherer.msg(self.nodeid_s, d)
@implementer(RILogGatherer)
class GathererService(GatheringBase):
# create this with 'flogtool create-gatherer BASEDIR'
# run this as 'cd BASEDIR && twistd -y gatherer.tac'
"""Run a service that gathers logs from multiple applications.
The LogGatherer sits in a corner and receives log events from many
applications at once. At startup, it runs a Tub and emits the gatherer's
long-term FURL. You can then configure your applications to connect to
this FURL when they start and pass it a reference to their LogPublisher.
The gatherer will subscribe to the publisher and save all the resulting
messages in a serialized flogfile.
Applications can use code like the following to create a LogPublisher and
pass it to the gatherer::
def tub_ready(self):
# called when the Tub is available for registerReference
lp = LogPublisher('logport.furl')
lp.setServiceParent(self.tub)
log_gatherer_furl = self.get_config('log_gatherer.furl')
if log_gatherer_furl:
self.tub.connectTo(log_gatherer_furl,
self._log_gatherer_connected, lp)
def _log_gatherer_connected(self, rref, lp):
rref.callRemote('logport', self.nodeid, lp)
This LogGatherer class is meant to be run by twistd from a .tac file, but
applications that want to provide the same functionality can just
instantiate it with a distinct basedir= and call startService.
"""
verbose = True
furlFile = "log_gatherer.furl"
tacFile = "gatherer.tac"
def __init__(self, rotate, use_bzip, basedir=None):
GatheringBase.__init__(self, basedir)
if rotate: # int or None
rotator = internet.TimerService(rotate, self.do_rotate)
rotator.setServiceParent(self)
bzip = None
if use_bzip:
bzips = procutils.which("bzip2")
if bzips:
bzip = bzips[0]
self.bzip = bzip
if signal and hasattr(signal, "SIGHUP"):
signal.signal(signal.SIGHUP, self._handle_SIGHUP)
self._savefile = None
def _handle_SIGHUP(self, *args):
reactor.callFromThread(self.do_rotate)
def startService(self):
# note: the rotator (if any) will fire as soon as startService is
# called, since TimerService uses now=True. To deal with this,
# do_rotate() tests self._savefile before doing anything else, and
# we're careful to upcall to startService before we do the first
# call to _open_savefile().
GatheringBase.startService(self)
now = time.time()
self._open_savefile(now)
def format_time(self, when):
return time.strftime(TIME_FORMAT, time.gmtime(when)) + "Z"
def _open_savefile(self, now):
new_filename = "from-%s---to-present.flog" % self.format_time(now)
self._savefile_name = os.path.join(self.basedir, new_filename)
self._savefile = open(self._savefile_name, "ab", 0)
self._savefile.write(flogfile.MAGIC)
self._starting_timestamp = now
flogfile.serialize_header(self._savefile, "gatherer",
start=self._starting_timestamp)
def do_rotate(self):
if not self._savefile:
return
self._savefile.close()
now = time.time()
from_time = self.format_time(self._starting_timestamp)
to_time = self.format_time(now)
new_name = "from-%s---to-%s.flog" % (from_time, to_time)
new_name = os.path.join(self.basedir, new_name)
move_into_place(self._savefile_name, new_name)
self._open_savefile(now)
if self.bzip:
# we spawn an external bzip process because it's easier than
# using the stdlib bz2 module and spreading the work out over
# several ticks. We're trying to resume accepting log events
# quickly here. We don't save the events using BZ2File because
# the gatherer might be killed at any moment, and BZ2File doesn't
# flush its output until the file is closed.
d = utils.getProcessOutput(self.bzip, [new_name], env=os.environ)
new_name = new_name + ".bz2"
def _compression_error(f):
print(f)
d.addErrback(_compression_error)
# note that by returning this Deferred, the rotation timer won't
# start again until the bzip process finishes
else:
d = defer.succeed(None)
d.addCallback(lambda res: new_name)
return d # for tests
def remote_logport(self, nodeid, publisher):
# nodeid is actually a printable string
nodeid_s = six.ensure_text(nodeid)
o = Observer(nodeid_s, self)
d = publisher.callRemote("subscribe_to_all", o)
d.addCallback(lambda res: None)
return d # mostly for testing
def msg(self, nodeid_s, d):
try:
flogfile.serialize_wrapper(self._savefile, d,
from_=nodeid_s,
rx_time=time.time())
except Exception as ex:
print("GATHERER: unable to serialize %s: %s" % (d, ex))
LOG_GATHERER_TACFILE = """\
# -*- python -*-
# we record the path when 'flogtool create-gatherer' is run, in case flogtool
# was run out of a source tree. This is somewhat fragile, of course.
stashed_path = [
%(path)s]
import sys
needed = [p for p in stashed_path if p not in sys.path]
sys.path = needed + sys.path
from foolscap.logging import gatherer
from twisted.application import service
rotate = %(rotate)s
use_bzip = %(use_bzip)s
gs = gatherer.GathererService(rotate, use_bzip)
application = service.Application('log_gatherer')
gs.setServiceParent(application)
"""
def create_log_gatherer(config):
basedir = config["basedir"]
stdout = config.stdout
assert config["port"]
assert config["location"]
if not os.path.exists(basedir):
os.makedirs(basedir)
f = open(os.path.join(basedir, "port"), "w")
f.write("%s\n" % config["port"])
f.close()
f = open(os.path.join(basedir, "location"), "w")
f.write("%s\n" % config["location"])
f.close()
f = open(os.path.join(basedir, "gatherer.tac"), "w")
stashed_path = ""
for p in sys.path:
stashed_path += " %r,\n" % p
if config["rotate"]:
rotate = config["rotate"]
else:
rotate = "None"
f.write(LOG_GATHERER_TACFILE % { 'path': stashed_path,
'rotate': rotate,
'use_bzip': bool(config["bzip"]),
})
f.close()
if not config["quiet"]:
print("Gatherer created in directory %s" % basedir, file=stdout)
print("Now run '(cd %s && twistd -y gatherer.tac)' to launch the daemon" % basedir, file=stdout)
###################
# Incident Gatherer
class CreateIncidentGatherOptions(usage.Options):
"""flogtool create-incident-gatherer BASEDIR"""
stdout = sys.stdout
stderr = sys.stderr
optFlags = [
("quiet", "q", "Don't print instructions to stdout"),
]
optParameters = [
("port", "p", "tcp:3118", "TCP port to listen on (strports string)"),
("location", "l", None, "(required) Tub location hints to use in generated FURLs. e.g. 'tcp:example.org:3118'"),
]
def opt_port(self, port):
assert not port.startswith("ssl:")
assert port != "tcp:0"
self["port"] = port
def parseArgs(self, basedir):
self["basedir"] = basedir
def postOptions(self):
if not self["location"]:
raise usage.UsageError("--location= is mandatory")
@implementer(RILogObserver)
class IncidentObserver(Referenceable):
def __init__(self, basedir, tubid_s, gatherer, publisher, stdout):
if not os.path.isdir(basedir):
os.makedirs(basedir)
self.basedir = filepath.FilePath(basedir)
self.tubid_s = tubid_s # printable string
self.gatherer = gatherer
self.publisher = publisher
self.stdout = stdout
self.caught_up_d = defer.Deferred()
self.incidents_wanted = []
self.incident_fetch_outstanding = False
def connect(self):
# look for a local state file, to see what incidents we've already
# got
statefile = self.basedir.child("latest").path
latest = ""
try:
latest = open(statefile, "r").read().strip()
except EnvironmentError:
pass
print("connected to %s, last known incident is %s" \
% (self.tubid_s, latest), file=self.stdout)
# now subscribe to everything since then
d = self.publisher.callRemote("subscribe_to_incidents", self,
catch_up=True,
since=six.ensure_binary(latest))
# for testing, we arrange for this Deferred (which governs the return
# from remote_logport) to not fire until we've finished catching up
# on all incidents.
d.addCallback(lambda res: self.caught_up_d)
return d
def remote_new_incident(self, name, trigger):
# name= should look like "incident-2008-07-29-204211-aspkxoi". We
# prevent name= from containing path metacharacters like / or : by
# using FilePath later on.
name = six.ensure_str(name)
self.incidents_wanted.append( (name, trigger) )
self.maybe_fetch_incident()
def maybe_fetch_incident(self):
# only fetch one incident at a time, to keep the sender's outbound
# memory usage to a reasonable level
if self.incident_fetch_outstanding:
return
if not self.incidents_wanted:
return
self.incident_fetch_outstanding = True
(name, trigger) = self.incidents_wanted.pop(0)
print("fetching incident", six.text_type(name), file=self.stdout)
d = self.publisher.callRemote("get_incident", six.ensure_binary(name))
def _clear_outstanding(res):
self.incident_fetch_outstanding = False
return res
d.addBoth(_clear_outstanding)
d.addCallback(self._got_incident, name, trigger)
d.addErrback(tw_log.err,
"IncidentObserver.get_incident or _got_incident")
d.addBoth(lambda ign: self.maybe_fetch_incident())
def _got_incident(self, incident, name, trigger):
# We always save the incident to a .bz2 file.
abs_fn = self.basedir.child(name).path # this prevents evil
abs_fn += ".flog.bz2"
# we need to record the relative pathname of the savefile, for use by
# the classifiers (they write it into their output files)
rel_fn = os.path.join("incidents", self.tubid_s, name) + ".flog.bz2"
self.save_incident(abs_fn, incident)
self.update_latest(name)
self.gatherer.new_incident(abs_fn, rel_fn, self.tubid_s, incident)
def save_incident(self, filename, incident):
now = time.time()
(header, events) = incident
f = bz2.BZ2File(filename, "w")
f.write(flogfile.MAGIC)
flogfile.serialize_raw_header(f, header)
for e in events:
flogfile.serialize_wrapper(f, e, from_=self.tubid_s, rx_time=now)
f.close()
def update_latest(self, name):
f = open(self.basedir.child("latest").path, "w")
f.write(name + "\n")
f.close()
def remote_done_with_incident_catchup(self):
self.caught_up_d.callback(None)
return None
@implementer(RILogGatherer)
class IncidentGathererService(GatheringBase, IncidentClassifierBase):
# create this with 'flogtool create-incident-gatherer BASEDIR'
# run this as 'cd BASEDIR && twistd -y gatherer.tac'
"""Run a service that gathers Incidents from multiple applications.
The IncidentGatherer sits in a corner and receives incidents from many
applications at once. At startup, it runs a Tub and emits the gatherer's
long-term FURL. You can then configure your applications to connect to
this FURL when they start and pass it a reference to their LogPublisher.
The gatherer will subscribe to the publisher and save all the resulting
incidents in the incidents/ directory, organized by the publisher's
tubid. The gatherer will also run a set of user-supplied classifier
functions on the incidents and put the filenames (one line per incident)
into files in the categories/ directory.
This IncidentGatherer class is meant to be run as a standalone service
from bin/flogtool, but by careful subclassing and setup it could be run
as part of some other application.
"""
verbose = True
furlFile = "log_gatherer.furl"
tacFile = "gatherer.tac"
def __init__(self, classifiers=[], basedir=None, stdout=None):
GatheringBase.__init__(self, basedir)
IncidentClassifierBase.__init__(self)
self.classifiers.extend(classifiers)
self.stdout = stdout
self.incidents_received = 0 # for tests
def startService(self):
indir = os.path.join(self.basedir, "incidents")
if not os.path.isdir(indir):
os.makedirs(indir)
outputdir = os.path.join(self.basedir, "classified")
if not os.path.isdir(outputdir):
os.makedirs(outputdir)
self.add_classify_files(self.basedir)
self.classify_stored_incidents(indir)
GatheringBase.startService(self)
def classify_stored_incidents(self, indir):
stdout = self.stdout or sys.stdout
print("classifying stored incidents", file=stdout)
# now classify all stored incidents that aren't already classified
already = set()
outputdir = os.path.join(self.basedir, "classified")
for category in os.listdir(outputdir):
for line in open(os.path.join(outputdir, category), "r"):
fn = line.strip()
abs_fn = os.path.join(self.basedir, fn)
already.add(abs_fn)
print("%d incidents already classified" % len(already), file=stdout)
count = 0
for tubid_s in os.listdir(indir):
nodedir = os.path.join(indir, tubid_s)
for fn in os.listdir(nodedir):
if fn.startswith("incident-"):
abs_fn = os.path.join(nodedir, fn)
if abs_fn in already:
continue
incident = self.load_incident(abs_fn)
rel_fn = os.path.join("incidents", tubid_s, fn)
self.move_incident(rel_fn, tubid_s, incident)
count += 1
print("done classifying %d stored incidents" % count, file=stdout)
def remote_logport(self, nodeid, publisher):
# we ignore nodeid (which is a printable string), and get the tubid
# from the publisher remoteReference. getRemoteTubID() protects us
# from .. and / and other nasties.
tubid_s = publisher.getRemoteTubID()
basedir = os.path.join(self.basedir, "incidents", tubid_s)
stdout = self.stdout or sys.stdout
o = IncidentObserver(basedir, tubid_s, self, publisher, stdout)
d = o.connect()
d.addCallback(lambda res: None)
return d # mostly for testing
def new_incident(self, abs_fn, rel_fn, tubid_s, incident):
self.move_incident(rel_fn, tubid_s, incident)
self.incidents_received += 1
def move_incident(self, rel_fn, tubid_s, incident):
stdout = self.stdout or sys.stdout
categories = self.classify_incident(incident)
for c in categories:
fn = os.path.join(self.basedir, "classified", c)
f = open(fn, "a")
f.write(rel_fn + "\n")
f.close()
print("classified %s as [%s]" % (rel_fn, ",".join(categories)), file=stdout)
return categories
INCIDENT_GATHERER_TACFILE = """\
# -*- python -*-
# we record the path when 'flogtool create-incident-gatherer' is run, in case
# flogtool was run out of a source tree. This is somewhat fragile, of course.
stashed_path = [
%(path)s]
import sys
needed = [p for p in stashed_path if p not in sys.path]
sys.path = needed + sys.path
from foolscap.logging import gatherer
from twisted.application import service
gs = gatherer.IncidentGathererService()
# To add a classifier function, store it in a neighboring file named
# classify_*.py, in a function named classify_incident(). All such files will
# be loaded at startup:
#
# %% cat classify_foolscap.py
# import re
# TUBCON_RE = re.compile(r'^Tub.connectorFinished: WEIRD, <foolscap.connection.TubConnector instance at \w+> is not in \[')
# def classify_incident(trigger):
# # match some foolscap messages
# m = trigger.get('message', '')
# if TUBCON_RE.search(m):
# return 'foolscap-tubconnector'
# %%
application = service.Application('incident_gatherer')
gs.setServiceParent(application)
"""
def create_incident_gatherer(config):
basedir = config["basedir"]
stdout = config.stdout
assert config["port"]
assert config["location"]
if not os.path.exists(basedir):
os.makedirs(basedir)
f = open(os.path.join(basedir, "port"), "w")
f.write("%s\n" % config["port"])
f.close()
f = open(os.path.join(basedir, "location"), "w")
f.write("%s\n" % config["location"])
f.close()
f = open(os.path.join(basedir, "gatherer.tac"), "w")
stashed_path = ""
for p in sys.path:
stashed_path += " %r,\n" % p
f.write(INCIDENT_GATHERER_TACFILE % { 'path': stashed_path,
})
f.close()
if not config["quiet"]:
print("Incident Gatherer created in directory %s" % basedir, file=stdout)
print("Now run '(cd %s && twistd -y gatherer.tac)' to launch the daemon" % basedir, file=stdout)
| mit | b271acaab057ad63fa1fa204d1e4e358 | 37.608547 | 123 | 0.622554 | 3.68992 | false | false | false | false |
lipis/electron-crash-reporter | main/model/base.py | 3 | 1201 | # coding: utf-8
from __future__ import absolute_import
from google.appengine.ext import ndb
from marshmallow import validate
from webargs.flaskparser import parser
from webargs import fields as wf
from api import fields
import config
import util
class Base(ndb.Model):
created = ndb.DateTimeProperty(auto_now_add=True)
modified = ndb.DateTimeProperty(auto_now=True)
version = ndb.IntegerProperty(default=config.CURRENT_VERSION_TIMESTAMP)
@classmethod
def get_by(cls, name, value):
return cls.query(getattr(cls, name) == value).get()
@classmethod
def get_dbs(cls, query=None, ancestor=None, order=None, limit=None, cursor=None, **kwargs):
args = parser.parse({
'cursor': wf.Str(missing=None),
'limit': wf.Int(missing=None, validate=validate.Range(min=-1)),
'order': wf.Str(missing=None),
})
return util.get_dbs(
query or cls.query(ancestor=ancestor),
limit=limit or args['limit'],
cursor=cursor or args['cursor'],
order=order or args['order'],
**kwargs
)
FIELDS = {
'key': fields.Key,
'id': fields.Id,
'version': fields.Integer,
'created': fields.DateTime,
'modified': fields.DateTime,
}
| mit | db3373b37c396c0ae384350de7ccc429 | 25.688889 | 93 | 0.680266 | 3.511696 | false | false | false | false |
lipis/electron-crash-reporter | main/api/v1/project.py | 1 | 2106 | # coding: utf-8
from __future__ import absolute_import
from google.appengine.ext import ndb
from flask.ext import restful
import flask
from api import helpers
import auth
import model
import util
from main import api_v1
@api_v1.resource('/project/', endpoint='api.project.list')
class ProjectListAPI(restful.Resource):
@auth.login_required
def get(self):
project_dbs, project_cursor = model.Project.get_dbs(user_key=auth.current_user_key())
return helpers.make_response(project_dbs, model.Project.FIELDS, project_cursor)
@api_v1.resource('/project/<string:project_key>/', endpoint='api.project')
class ProjectAPI(restful.Resource):
@auth.login_required
def get(self, project_key):
project_db = ndb.Key(urlsafe=project_key).get()
if not project_db or project_db.user_key != auth.current_user_key():
helpers.make_not_found_exception('Project %s not found' % project_key)
return helpers.make_response(project_db, model.Project.FIELDS)
###############################################################################
# Admin
###############################################################################
@api_v1.resource('/admin/project/', endpoint='api.admin.project.list')
class AdminProjectListAPI(restful.Resource):
@auth.admin_required
def get(self):
project_keys = util.param('project_keys', list)
if project_keys:
project_db_keys = [ndb.Key(urlsafe=k) for k in project_keys]
project_dbs = ndb.get_multi(project_db_keys)
return helpers.make_response(project_dbs, model.project.FIELDS)
project_dbs, project_cursor = model.Project.get_dbs()
return helpers.make_response(project_dbs, model.Project.FIELDS, project_cursor)
@api_v1.resource('/admin/project/<string:project_key>/', endpoint='api.admin.project')
class AdminProjectAPI(restful.Resource):
@auth.admin_required
def get(self, project_key):
project_db = ndb.Key(urlsafe=project_key).get()
if not project_db:
helpers.make_not_found_exception('Project %s not found' % project_key)
return helpers.make_response(project_db, model.Project.FIELDS)
| mit | 3305cfb7af7a3b07e331c7939538e529 | 34.694915 | 89 | 0.676163 | 3.6 | false | false | false | false |
pytest-dev/pluggy | scripts/release.py | 1 | 1987 | """
Release script.
"""
import argparse
import sys
from subprocess import check_call
from colorama import Fore
from colorama import init
from git import Remote
from git import Repo
def create_branch(version):
"""Create a fresh branch from upstream/main"""
repo = Repo.init(".")
if repo.is_dirty(untracked_files=True):
raise RuntimeError("Repository is dirty, please commit/stash your changes.")
branch_name = f"release-{version}"
print(f"{Fore.CYAN}Create {branch_name} branch from upstream main")
upstream = get_upstream(repo)
upstream.fetch()
release_branch = repo.create_head(branch_name, upstream.refs.main, force=True)
release_branch.checkout()
return repo
def get_upstream(repo: Repo) -> Remote:
"""Find upstream repository for pluggy on the remotes"""
for remote in repo.remotes:
for url in remote.urls:
if url.endswith(("pytest-dev/pluggy.git", "pytest-dev/pluggy")):
return remote
raise RuntimeError("could not find pytest-dev/pluggy remote")
def pre_release(version):
"""Generates new docs, release announcements and creates a local tag."""
create_branch(version)
changelog(version, write_out=True)
check_call(["git", "commit", "-a", "-m", f"Preparing release {version}"])
print()
print(f"{Fore.GREEN}Please push your branch to your fork and open a PR.")
def changelog(version, write_out=False):
if write_out:
addopts = []
else:
addopts = ["--draft"]
print(f"{Fore.CYAN}Generating CHANGELOG")
check_call(["towncrier", "--yes", "--version", version] + addopts)
def main():
init(autoreset=True)
parser = argparse.ArgumentParser()
parser.add_argument("version", help="Release version")
options = parser.parse_args()
try:
pre_release(options.version)
except RuntimeError as e:
print(f"{Fore.RED}ERROR: {e}")
return 1
if __name__ == "__main__":
sys.exit(main())
| mit | a69f80f1c73680db45aeb5a9be3fd1da | 26.985915 | 84 | 0.658279 | 3.770398 | false | false | false | false |
lipis/electron-crash-reporter | main/auth/yahoo.py | 8 | 2108 | # coding: utf-8
import flask
import auth
import model
import util
from main import app
yahoo_config = dict(
access_token_url='https://api.login.yahoo.com/oauth/v2/get_token',
authorize_url='https://api.login.yahoo.com/oauth/v2/request_auth',
base_url='https://query.yahooapis.com/',
consumer_key=model.Config.get_master_db().yahoo_consumer_key,
consumer_secret=model.Config.get_master_db().yahoo_consumer_secret,
request_token_url='https://api.login.yahoo.com/oauth/v2/get_request_token',
)
yahoo = auth.create_oauth_app(yahoo_config, 'yahoo')
@app.route('/api/auth/callback/yahoo/')
def yahoo_authorized():
response = yahoo.authorized_response()
if response is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(util.get_next_url())
flask.session['oauth_token'] = (
response['oauth_token'],
response['oauth_token_secret'],
)
fields = 'guid, emails, familyName, givenName, nickname'
me = yahoo.get(
'/v1/yql',
data={
'format': 'json',
'q': 'select %s from social.profile where guid = me;' % fields,
'realm': 'yahooapis.com',
},
)
user_db = retrieve_user_from_yahoo(me.data['query']['results']['profile'])
return auth.signin_user_db(user_db)
@yahoo.tokengetter
def get_yahoo_oauth_token():
return flask.session.get('oauth_token')
@app.route('/signin/yahoo/')
def signin_yahoo():
return auth.signin_oauth(yahoo)
def retrieve_user_from_yahoo(response):
auth_id = 'yahoo_%s' % response['guid']
user_db = model.User.get_by('auth_ids', auth_id)
if user_db:
return user_db
names = [response.get('givenName', ''), response.get('familyName', '')]
emails = response.get('emails', {})
if not isinstance(emails, list):
emails = [emails]
emails = [e for e in emails if 'handle' in e]
emails.sort(key=lambda e: e.get('primary', False))
email = emails[0]['handle'] if emails else ''
return auth.create_user_db(
auth_id=auth_id,
name=' '.join(names).strip() or response['nickname'],
username=response['nickname'],
email=email,
verified=bool(email),
)
| mit | ae3ade71d6880e8bdf33a5bf168f8ce7 | 26.376623 | 77 | 0.669829 | 3.155689 | false | true | false | false |
lipis/electron-crash-reporter | main/auth/linkedin.py | 8 | 1892 | # coding: utf-8
import flask
import auth
import config
import model
import util
from main import app
linkedin_config = dict(
access_token_method='POST',
access_token_url='https://www.linkedin.com/uas/oauth2/accessToken',
authorize_url='https://www.linkedin.com/uas/oauth2/authorization',
base_url='https://api.linkedin.com/v1/',
consumer_key=config.CONFIG_DB.linkedin_api_key,
consumer_secret=config.CONFIG_DB.linkedin_secret_key,
request_token_params={
'scope': 'r_basicprofile r_emailaddress',
'state': util.uuid(),
},
)
linkedin = auth.create_oauth_app(linkedin_config, 'linkedin')
def change_linkedin_query(uri, headers, body):
headers['x-li-format'] = 'json'
return uri, headers, body
linkedin.pre_request = change_linkedin_query
@app.route('/api/auth/callback/linkedin/')
def linkedin_authorized():
response = linkedin.authorized_response()
if response is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(util.get_next_url())
flask.session['access_token'] = (response['access_token'], '')
me = linkedin.get('people/~:(id,first-name,last-name,email-address)')
user_db = retrieve_user_from_linkedin(me.data)
return auth.signin_user_db(user_db)
@linkedin.tokengetter
def get_linkedin_oauth_token():
return flask.session.get('access_token')
@app.route('/signin/linkedin/')
def signin_linkedin():
return auth.signin_oauth(linkedin)
def retrieve_user_from_linkedin(response):
auth_id = 'linkedin_%s' % response['id']
user_db = model.User.get_by('auth_ids', auth_id)
if user_db:
return user_db
names = [response.get('firstName', ''), response.get('lastName', '')]
name = ' '.join(names).strip()
email = response.get('emailAddress', '')
return auth.create_user_db(
auth_id=auth_id,
name=name,
username=email or name,
email=email,
verified=bool(email),
)
| mit | 18fe19d9a3b50f4d7a675d7bcdb10477 | 24.567568 | 71 | 0.69926 | 3.101639 | false | true | false | false |
lipis/electron-crash-reporter | main/api/helpers.py | 2 | 1762 | # coding: utf-8
from datetime import datetime
import logging
from flask.ext import restful
from werkzeug import exceptions
import flask
import util
class Api(restful.Api):
def unauthorized(self, response):
flask.abort(401)
def handle_error(self, e):
return handle_error(e)
def handle_error(e):
logging.exception(e)
try:
e.code
except AttributeError:
e.code = 500
e.name = e.description = 'Internal Server Error'
return util.jsonpify({
'status': 'error',
'error_code': e.code,
'error_name': util.slugify(e.name),
'error_message': e.name,
'error_class': e.__class__.__name__,
'description': e.description,
}), e.code
def make_response(data, marshal_table, cursors=None):
if util.is_iterable(data):
response = {
'status': 'success',
'count': len(data),
'now': datetime.utcnow().isoformat(),
'result': map(lambda l: restful.marshal(l, marshal_table), data),
}
if cursors:
if isinstance(cursors, dict):
if cursors.get('next'):
response['next_cursor'] = cursors['next']
response['next_url'] = util.generate_next_url(cursors['next'])
if cursors.get('prev'):
response['prev_cursor'] = cursors['prev']
response['prev_url'] = util.generate_next_url(cursors['prev'])
else:
response['next_cursor'] = cursors
response['next_url'] = util.generate_next_url(cursors)
return util.jsonpify(response)
return util.jsonpify({
'status': 'success',
'now': datetime.utcnow().isoformat(),
'result': restful.marshal(data, marshal_table),
})
def make_not_found_exception(description):
exception = exceptions.NotFound()
exception.description = description
raise exception
| mit | 59491ce4ec2bd44e168305438c255689 | 24.911765 | 72 | 0.641884 | 3.545272 | false | false | false | false |
lipis/electron-crash-reporter | main/control/project.py | 1 | 4751 | # coding: utf-8
from flask.ext import wtf
from google.appengine.ext import ndb
import flask
import wtforms
import auth
import config
import model
import util
from main import app
###############################################################################
# Update
###############################################################################
class ProjectUpdateForm(wtf.Form):
name = wtforms.StringField(
model.Project.name._verbose_name,
[wtforms.validators.required()],
filters=[util.strip_filter],
)
token = wtforms.StringField(
model.Project.token._verbose_name,
[wtforms.validators.optional()],
filters=[util.strip_filter],
)
@app.route('/project/create/', methods=['GET', 'POST'])
@app.route('/project/<int:project_id>/update/', methods=['GET', 'POST'])
@auth.login_required
def project_update(project_id=0):
if project_id:
project_db = model.Project.get_by_id(project_id)
else:
project_db = model.Project(user_key=auth.current_user_key())
if not project_db or project_db.user_key != auth.current_user_key():
flask.abort(404)
form = ProjectUpdateForm(obj=project_db)
if form.validate_on_submit():
form.populate_obj(project_db)
project_db.put()
return flask.redirect(flask.url_for('project_view', project_id=project_db.key.id()))
return flask.render_template(
'project/project_update.html',
title=project_db.name if project_id else 'New Project',
html_class='project-update',
form=form,
project_db=project_db,
)
###############################################################################
# List
###############################################################################
@app.route('/project/')
@auth.login_required
def project_list():
project_dbs, project_cursor = model.Project.get_dbs(user_key=auth.current_user_key())
return flask.render_template(
'project/project_list.html',
html_class='project-list',
title='Projects',
project_dbs=project_dbs,
next_url=util.generate_next_url(project_cursor),
api_url=flask.url_for('api.project.list'),
)
###############################################################################
# View
###############################################################################
@app.route('/project/<int:project_id>/')
@auth.login_required
def project_view(project_id):
project_db = model.Project.get_by_id(project_id)
if not project_db or project_db.user_key != auth.current_user_key():
flask.abort(404)
crash_dbs, crash_cursor = project_db.get_crash_dbs(
order=util.param('order') or '-created',
)
return flask.render_template(
'project/project_view.html',
html_class='project-view',
title=project_db.name,
project_db=project_db,
crash_dbs=crash_dbs,
next_url=util.generate_next_url(crash_cursor),
api_url=flask.url_for('api.project', project_key=project_db.key.urlsafe() if project_db.key else ''),
)
###############################################################################
# Admin List
###############################################################################
@app.route('/admin/project/')
@auth.admin_required
def admin_project_list():
project_dbs, project_cursor = model.Project.get_dbs(
order=util.param('order') or '-modified',
)
return flask.render_template(
'project/admin_project_list.html',
html_class='admin-project-list',
title='Projects',
project_dbs=project_dbs,
next_url=util.generate_next_url(project_cursor),
api_url=flask.url_for('api.admin.project.list'),
)
###############################################################################
# Admin Update
###############################################################################
class ProjectUpdateAdminForm(ProjectUpdateForm):
pass
@app.route('/admin/project/create/', methods=['GET', 'POST'])
@app.route('/admin/project/<int:project_id>/update/', methods=['GET', 'POST'])
@auth.admin_required
def admin_project_update(project_id=0):
if project_id:
project_db = model.Project.get_by_id(project_id)
else:
project_db = model.Project(user_key=auth.current_user_key())
if not project_db:
flask.abort(404)
form = ProjectUpdateAdminForm(obj=project_db)
if form.validate_on_submit():
form.populate_obj(project_db)
project_db.put()
return flask.redirect(flask.url_for('admin_project_list', order='-modified'))
return flask.render_template(
'project/admin_project_update.html',
title=project_db.name,
html_class='admin-project-update',
form=form,
project_db=project_db,
back_url_for='admin_project_list',
api_url=flask.url_for('api.admin.project', project_key=project_db.key.urlsafe() if project_db.key else ''),
)
| mit | b4acd5d9accfe04b3e288e852829d880 | 29.651613 | 111 | 0.57651 | 3.794728 | false | false | false | false |
lipis/electron-crash-reporter | main/control/profile.py | 1 | 3627 | # coding: utf-8
from flask.ext import wtf
import flask
import wtforms
import auth
import config
import model
import util
import task
from main import app
###############################################################################
# Profile View
###############################################################################
@app.route('/profile/')
@auth.login_required
def profile():
user_db = auth.current_user_db()
return flask.render_template(
'profile/profile.html',
title=user_db.name,
html_class='profile-view',
user_db=user_db,
)
###############################################################################
# Profile Update
###############################################################################
class ProfileUpdateForm(wtf.Form):
name = wtforms.StringField(
model.User.name._verbose_name,
[wtforms.validators.required()], filters=[util.strip_filter],
)
email = wtforms.StringField(
model.User.email._verbose_name,
[wtforms.validators.optional(), wtforms.validators.email()],
filters=[util.email_filter],
)
@app.route('/profile/update/', methods=['GET', 'POST'])
@auth.login_required
def profile_update():
user_db = auth.current_user_db()
form = ProfileUpdateForm(obj=user_db)
if form.validate_on_submit():
email = form.email.data
if email and not user_db.is_email_available(email, user_db.key):
form.email.errors.append('This email is already taken.')
if not form.errors:
send_verification = not user_db.token or user_db.email != email
form.populate_obj(user_db)
if send_verification:
user_db.verified = False
task.verify_email_notification(user_db)
user_db.put()
return flask.redirect(flask.url_for('profile'))
return flask.render_template(
'profile/profile_update.html',
title=user_db.name,
html_class='profile-update',
form=form,
user_db=user_db,
)
###############################################################################
# Profile Password
###############################################################################
class ProfilePasswordForm(wtf.Form):
old_password = wtforms.StringField(
'Old Password', [wtforms.validators.optional()],
)
new_password = wtforms.StringField(
'New Password',
[wtforms.validators.required(), wtforms.validators.length(min=6)]
)
@app.route('/profile/password/', methods=['GET', 'POST'])
@auth.login_required
def profile_password():
if not config.CONFIG_DB.has_email_authentication:
flask.abort(418)
user_db = auth.current_user_db()
form = ProfilePasswordForm(obj=user_db)
if form.validate_on_submit():
errors = False
old_password = form.old_password.data
new_password = form.new_password.data
if new_password or old_password:
if user_db.password_hash:
if util.password_hash(user_db, old_password) != user_db.password_hash:
form.old_password.errors.append('Invalid current password')
errors = True
if not errors and old_password and not new_password:
form.new_password.errors.append('This field is required.')
errors = True
if not (form.errors or errors):
user_db.password_hash = util.password_hash(user_db, new_password)
flask.flash('Your password has been changed.', category='success')
if not (form.errors or errors):
user_db.put()
return flask.redirect(flask.url_for('profile'))
return flask.render_template(
'profile/profile_password.html',
title=user_db.name,
html_class='profile-password',
form=form,
user_db=user_db,
)
| mit | 3e0156f96e99d63826a222aca4f0e29c | 28.25 | 79 | 0.590019 | 3.972618 | false | false | false | false |
dgilland/pydash | src/pydash/arrays.py | 1 | 59046 | """
Functions that operate on lists.
.. versionadded:: 1.0.0
"""
from bisect import bisect_left, bisect_right
from functools import cmp_to_key
from math import ceil
import pydash as pyd
from .helpers import base_get, iteriteratee, parse_iteratee
__all__ = (
"chunk",
"compact",
"concat",
"difference",
"difference_by",
"difference_with",
"drop",
"drop_right",
"drop_right_while",
"drop_while",
"duplicates",
"fill",
"find_index",
"find_last_index",
"flatten",
"flatten_deep",
"flatten_depth",
"from_pairs",
"head",
"index_of",
"initial",
"intercalate",
"interleave",
"intersection",
"intersection_by",
"intersection_with",
"intersperse",
"last",
"last_index_of",
"mapcat",
"nth",
"pull",
"pull_all",
"pull_all_by",
"pull_all_with",
"pull_at",
"push",
"remove",
"reverse",
"shift",
"slice_",
"sort",
"sorted_index",
"sorted_index_by",
"sorted_index_of",
"sorted_last_index",
"sorted_last_index_by",
"sorted_last_index_of",
"sorted_uniq",
"sorted_uniq_by",
"splice",
"split_at",
"tail",
"take",
"take_right",
"take_right_while",
"take_while",
"union",
"union_by",
"union_with",
"uniq",
"uniq_by",
"uniq_with",
"unshift",
"unzip",
"unzip_with",
"without",
"xor",
"xor_by",
"xor_with",
"zip_",
"zip_object",
"zip_object_deep",
"zip_with",
)
def chunk(array, size=1):
"""
Creates a list of elements split into groups the length of `size`. If `array` can't be split
evenly, the final chunk will be the remaining elements.
Args:
array (list): List to chunk.
size (int, optional): Chunk size. Defaults to ``1``.
Returns:
list: New list containing chunks of `array`.
Example:
>>> chunk([1, 2, 3, 4, 5], 2)
[[1, 2], [3, 4], [5]]
.. versionadded:: 1.1.0
"""
chunks = int(ceil(len(array) / float(size)))
return [array[i * size : (i + 1) * size] for i in range(chunks)]
def compact(array):
"""
Creates a list with all falsey values of array removed.
Args:
array (list): List to compact.
Returns:
list: Compacted list.
Example:
>>> compact(['', 1, 0, True, False, None])
[1, True]
.. versionadded:: 1.0.0
"""
return [item for item in array if item]
def concat(*arrays):
"""
Concatenates zero or more lists into one.
Args:
arrays (list): Lists to concatenate.
Returns:
list: Concatenated list.
Example:
>>> concat([1, 2], [3, 4], [[5], [6]])
[1, 2, 3, 4, [5], [6]]
.. versionadded:: 2.0.0
.. versionchanged:: 4.0.0
Renamed from ``cat`` to ``concat``.
"""
return flatten(arrays)
def difference(array, *others):
"""
Creates a list of list elements not present in others.
Args:
array (list): List to process.
others (list): Lists to check.
Returns:
list: Difference between `others`.
Example:
>>> difference([1, 2, 3], [1], [2])
[3]
.. versionadded:: 1.0.0
"""
return difference_with(array, *others)
def difference_by(array, *others, **kwargs):
"""
This method is like :func:`difference` except that it accepts an iteratee which is invoked for
each element of each array to generate the criterion by which they're compared. The order and
references of result values are determined by `array`. The iteratee is invoked with one
argument: ``(value)``.
Args:
array (list): The array to find the difference of.
others (list): Lists to check for difference with `array`.
Keyword Args:
iteratee (mixed, optional): Function to transform the elements of the arrays. Defaults to
:func:`.identity`.
Returns:
list: Difference between `others`.
Example:
>>> difference_by([1.2, 1.5, 1.7, 2.8], [0.9, 3.2], round)
[1.5, 1.7]
.. versionadded:: 4.0.0
"""
array = array[:]
if not others:
return array
# Check if last other is a potential iteratee.
iteratee, others = parse_iteratee("iteratee", *others, **kwargs)
for other in others:
if not other:
continue
array = list(iterdifference(array, other, iteratee=iteratee))
return array
def difference_with(array, *others, **kwargs):
"""
This method is like :func:`difference` except that it accepts a comparator which is invoked to
compare the elements of all arrays. The order and references of result values are determined by
the first array. The comparator is invoked with two arguments: ``(arr_val, oth_val)``.
Args:
array (list): The array to find the difference of.
others (list): Lists to check for difference with `array`.
Keyword Args:
comparator (callable, optional): Function to compare the elements of the arrays. Defaults to
:func:`.is_equal`.
Returns:
list: Difference between `others`.
Example:
>>> array = ['apple', 'banana', 'pear']
>>> others = (['avocado', 'pumpkin'], ['peach'])
>>> comparator = lambda a, b: a[0] == b[0]
>>> difference_with(array, *others, comparator=comparator)
['banana']
.. versionadded:: 4.0.0
"""
array = array[:]
if not others:
return array
comparator = kwargs.get("comparator")
last_other = others[-1]
# Check if last other is a comparator.
if comparator is None and (callable(last_other) or last_other is None):
comparator = last_other
others = others[:-1]
for other in others:
if not other:
continue
array = list(iterdifference(array, other, comparator=comparator))
return array
def drop(array, n=1):
"""
Creates a slice of `array` with `n` elements dropped from the beginning.
Args:
array (list): List to process.
n (int, optional): Number of elements to drop. Defaults to ``1``.
Returns:
list: Dropped list.
Example:
>>> drop([1, 2, 3, 4], 2)
[3, 4]
.. versionadded:: 1.0.0
.. versionchanged:: 1.1.0
Added ``n`` argument and removed as alias of :func:`rest`.
.. versionchanged:: 3.0.0
Made ``n`` default to ``1``.
"""
return drop_while(array, lambda _, index: index < n)
def drop_right(array, n=1):
"""
Creates a slice of `array` with `n` elements dropped from the end.
Args:
array (list): List to process.
n (int, optional): Number of elements to drop. Defaults to ``1``.
Returns:
list: Dropped list.
Example:
>>> drop_right([1, 2, 3, 4], 2)
[1, 2]
.. versionadded:: 1.1.0
.. versionchanged:: 3.0.0
Made ``n`` default to ``1``.
"""
length = len(array)
return drop_right_while(array, lambda _, index: (length - index) <= n)
def drop_right_while(array, predicate=None):
"""
Creates a slice of `array` excluding elements dropped from the end. Elements are dropped until
the `predicate` returns falsey. The `predicate` is invoked with three arguments: ``(value,
index, array)``.
Args:
array (list): List to process.
predicate (mixed): Predicate called per iteration
Returns:
list: Dropped list.
Example:
>>> drop_right_while([1, 2, 3, 4], lambda x: x >= 3)
[1, 2]
.. versionadded:: 1.1.0
"""
n = len(array)
for is_true, _, _, _ in iteriteratee(array, predicate, reverse=True):
if is_true:
n -= 1
else:
break
return array[:n]
def drop_while(array, predicate=None):
"""
Creates a slice of `array` excluding elements dropped from the beginning. Elements are dropped
until the `predicate` returns falsey. The `predicate` is invoked with three arguments: ``(value,
index, array)``.
Args:
array (list): List to process.
predicate (mixed): Predicate called per iteration
Returns:
list: Dropped list.
Example:
>>> drop_while([1, 2, 3, 4], lambda x: x < 3)
[3, 4]
.. versionadded:: 1.1.0
"""
n = 0
for is_true, _, _, _ in iteriteratee(array, predicate):
if is_true:
n += 1
else:
break
return array[n:]
def duplicates(array, iteratee=None):
"""
Creates a unique list of duplicate values from `array`. If iteratee is passed, each element of
array is passed through a iteratee before duplicates are computed. The iteratee is invoked with
three arguments: ``(value, index, array)``. If an object path is passed for iteratee, the
created iteratee will return the path value of the given element. If an object is passed for
iteratee, the created filter style iteratee will return ``True`` for elements that have the
properties of the given object, else ``False``.
Args:
array (list): List to process.
iteratee (mixed, optional): Iteratee applied per iteration.
Returns:
list: List of duplicates.
Example:
>>> duplicates([0, 1, 3, 2, 3, 1])
[3, 1]
.. versionadded:: 3.0.0
"""
if iteratee:
cbk = pyd.iteratee(iteratee)
computed = [cbk(item) for item in array]
else:
computed = array
# NOTE: Using array[i] instead of item since iteratee could have modified
# returned item values.
lst = uniq(array[i] for i, _ in iterduplicates(computed))
return lst
def fill(array, value, start=0, end=None):
"""
Fills elements of array with value from `start` up to, but not including, `end`.
Args:
array (list): List to fill.
value (mixed): Value to fill with.
start (int, optional): Index to start filling. Defaults to ``0``.
end (int, optional): Index to end filling. Defaults to ``len(array)``.
Returns:
list: Filled `array`.
Example:
>>> fill([1, 2, 3, 4, 5], 0)
[0, 0, 0, 0, 0]
>>> fill([1, 2, 3, 4, 5], 0, 1, 3)
[1, 0, 0, 4, 5]
>>> fill([1, 2, 3, 4, 5], 0, 0, 100)
[0, 0, 0, 0, 0]
Warning:
`array` is modified in place.
.. versionadded:: 3.1.0
"""
if end is None:
end = len(array)
else:
end = min(end, len(array))
# Use this style of assignment so that `array` is mutated.
array[:] = array[:start] + [value] * len(array[start:end]) + array[end:]
return array
def find_index(array, predicate=None):
"""
This method is similar to :func:`pydash.collections.find`, except that it returns the index of
the element that passes the predicate check, instead of the element itself.
Args:
array (list): List to process.
predicate (mixed, optional): Predicate applied per iteration.
Returns:
int: Index of found item or ``-1`` if not found.
Example:
>>> find_index([1, 2, 3, 4], lambda x: x >= 3)
2
>>> find_index([1, 2, 3, 4], lambda x: x > 4)
-1
.. versionadded:: 1.0.0
"""
search = (i for is_true, _, i, _ in iteriteratee(array, predicate) if is_true)
return next(search, -1)
def find_last_index(array, predicate=None):
"""
This method is similar to :func:`find_index`, except that it iterates over elements from right
to left.
Args:
array (list): List to process.
predicate (mixed, optional): Predicate applied per iteration.
Returns:
int: Index of found item or ``-1`` if not found.
Example:
>>> find_last_index([1, 2, 3, 4], lambda x: x >= 3)
3
>>> find_last_index([1, 2, 3, 4], lambda x: x > 4)
-1
.. versionadded:: 1.0.0
"""
search = (i for is_true, _, i, _ in iteriteratee(array, predicate, reverse=True) if is_true)
return next(search, -1)
def flatten(array):
"""
Flattens array a single level deep.
Args:
array (list): List to flatten.
Returns:
list: Flattened list.
Example:
>>> flatten([[1], [2, [3]], [[4]]])
[1, 2, [3], [4]]
.. versionadded:: 1.0.0
.. versionchanged:: 2.0.0
Removed `callback` option. Added ``is_deep`` option. Made it shallow
by default.
.. versionchanged:: 4.0.0
Removed ``is_deep`` option. Use :func:`flatten_deep` instead.
"""
return flatten_depth(array, depth=1)
def flatten_deep(array):
"""
Flattens an array recursively.
Args:
array (list): List to flatten.
Returns:
list: Flattened list.
Example:
>>> flatten_deep([[1], [2, [3]], [[4]]])
[1, 2, 3, 4]
.. versionadded:: 2.0.0
"""
return flatten_depth(array, depth=-1)
def flatten_depth(array, depth=1):
"""
Recursively flatten `array` up to `depth` times.
Args:
array (list): List to flatten.
depth (int, optional): Depth to flatten to. Defaults to ``1``.
Returns:
list: Flattened list.
Example:
>>> flatten_depth([[[1], [2, [3]], [[4]]]], 1)
[[1], [2, [3]], [[4]]]
>>> flatten_depth([[[1], [2, [3]], [[4]]]], 2)
[1, 2, [3], [4]]
>>> flatten_depth([[[1], [2, [3]], [[4]]]], 3)
[1, 2, 3, 4]
>>> flatten_depth([[[1], [2, [3]], [[4]]]], 4)
[1, 2, 3, 4]
.. versionadded:: 4.0.0
"""
return list(iterflatten(array, depth=depth))
def from_pairs(pairs):
"""
Returns a dict from the given list of pairs.
Args:
pairs (list): List of key-value pairs.
Returns:
dict
Example:
>>> from_pairs([['a', 1], ['b', 2]]) == {'a': 1, 'b': 2}
True
.. versionadded:: 4.0.0
"""
return dict(pairs)
def head(array):
"""
Return the first element of `array`.
Args:
array (list): List to process.
Returns:
mixed: First element of list.
Example:
>>> head([1, 2, 3, 4])
1
.. versionadded:: 1.0.0
.. versionchanged::
Renamed from ``first`` to ``head``.
"""
return base_get(array, 0, default=None)
def index_of(array, value, from_index=0):
"""
Gets the index at which the first occurrence of value is found.
Args:
array (list): List to search.
value (mixed): Value to search for.
from_index (int, optional): Index to search from.
Returns:
int: Index of found item or ``-1`` if not found.
Example:
>>> index_of([1, 2, 3, 4], 2)
1
>>> index_of([2, 1, 2, 3], 2, from_index=1)
2
.. versionadded:: 1.0.0
"""
try:
return array.index(value, from_index)
except ValueError:
return -1
def initial(array):
"""
Return all but the last element of `array`.
Args:
array (list): List to process.
Returns:
list: Initial part of `array`.
Example:
>>> initial([1, 2, 3, 4])
[1, 2, 3]
.. versionadded:: 1.0.0
"""
return array[:-1]
def intercalate(array, separator):
"""
Like :func:`intersperse` for lists of lists but shallowly flattening the result.
Args:
array (list): List to intercalate.
separator (mixed): Element to insert.
Returns:
list: Intercalated list.
Example:
>>> intercalate([1, [2], [3], 4], 'x')
[1, 'x', 2, 'x', 3, 'x', 4]
.. versionadded:: 2.0.0
"""
return flatten(intersperse(array, separator))
def interleave(*arrays):
"""
Merge multiple lists into a single list by inserting the next element of each list by sequential
round-robin into the new list.
Args:
arrays (list): Lists to interleave.
Returns:
list: Interleaved list.
Example:
>>> interleave([1, 2, 3], [4, 5, 6], [7, 8, 9])
[1, 4, 7, 2, 5, 8, 3, 6, 9]
.. versionadded:: 2.0.0
"""
return list(iterinterleave(*arrays))
def intersection(array, *others):
"""
Computes the intersection of all the passed-in arrays.
Args:
array (list): The array to find the intersection of.
others (list): Lists to check for intersection with `array`.
Returns:
list: Intersection of provided lists.
Example:
>>> intersection([1, 2, 3], [1, 2, 3, 4, 5], [2, 3])
[2, 3]
>>> intersection([1, 2, 3])
[1, 2, 3]
.. versionadded:: 1.0.0
.. versionchanged:: 4.0.0
Support finding intersection of unhashable types.
"""
return intersection_with(array, *others)
def intersection_by(array, *others, **kwargs):
"""
This method is like :func:`intersection` except that it accepts an iteratee which is invoked for
each element of each array to generate the criterion by which they're compared. The order and
references of result values are determined by `array`. The iteratee is invoked with one
argument: ``(value)``.
Args:
array (list): The array to find the intersection of.
others (list): Lists to check for intersection with `array`.
Keyword Args:
iteratee (mixed, optional): Function to transform the elements of the arrays. Defaults to
:func:`.identity`.
Returns:
list: Intersection of provided lists.
Example:
>>> intersection_by([1.2, 1.5, 1.7, 2.8], [0.9, 3.2], round)
[1.2, 2.8]
.. versionadded:: 4.0.0
"""
array = array[:]
if not others:
return array
iteratee, others = parse_iteratee("iteratee", *others, **kwargs)
# Sort by smallest list length to make intersection faster.
others = sorted(others, key=lambda other: len(other))
for other in others:
array = list(iterintersection(array, other, iteratee=iteratee))
if not array:
break
return array
def intersection_with(array, *others, **kwargs):
"""
This method is like :func:`intersection` except that it accepts a comparator which is invoked to
compare the elements of all arrays. The order and references of result values are determined by
the first array. The comparator is invoked with two arguments: ``(arr_val, oth_val)``.
Args:
array (list): The array to find the intersection of.
others (list): Lists to check for intersection with `array`.
Keyword Args:
comparator (callable, optional): Function to compare the elements of the arrays. Defaults to
:func:`.is_equal`.
Returns:
list: Intersection of provided lists.
Example:
>>> array = ['apple', 'banana', 'pear']
>>> others = (['avocado', 'pumpkin'], ['peach'])
>>> comparator = lambda a, b: a[0] == b[0]
>>> intersection_with(array, *others, comparator=comparator)
['pear']
.. versionadded:: 4.0.0
"""
array = array[:]
if not others:
return array
comparator, others = parse_iteratee("comparator", *others, **kwargs)
# Sort by smallest list length to reduce to intersection faster.
others = sorted(others, key=lambda other: len(other))
for other in others:
array = list(iterintersection(array, other, comparator=comparator))
if not array:
break
return array
def intersperse(array, separator):
"""
Insert a separating element between the elements of `array`.
Args:
array (list): List to intersperse.
separator (mixed): Element to insert.
Returns:
list: Interspersed list.
Example:
>>> intersperse([1, [2], [3], 4], 'x')
[1, 'x', [2], 'x', [3], 'x', 4]
.. versionadded:: 2.0.0
"""
return list(iterintersperse(array, separator))
def last(array):
"""
Return the last element of `array`.
Args:
array (list): List to process.
Returns:
mixed: Last part of `array`.
Example:
>>> last([1, 2, 3, 4])
4
.. versionadded:: 1.0.0
"""
return base_get(array, -1, default=None)
def last_index_of(array, value, from_index=None):
"""
Gets the index at which the last occurrence of value is found.
Args:
array (list): List to search.
value (mixed): Value to search for.
from_index (int, optional): Index to search from.
Returns:
int: Index of found item or ``False`` if not found.
Example:
>>> last_index_of([1, 2, 2, 4], 2)
2
>>> last_index_of([1, 2, 2, 4], 2, from_index=1)
1
.. versionadded:: 1.0.0
"""
index = array_len = len(array)
try:
from_index = int(from_index)
except (TypeError, ValueError):
pass
else:
# Set starting index base on from_index offset.
index = max(0, index + from_index) if from_index < 0 else min(from_index, index - 1)
while index:
if index < array_len and array[index] == value:
return index
index -= 1
return -1
def mapcat(array, iteratee=None):
"""
Map a iteratee to each element of a list and concatenate the results into a single list using
:func:`concat`.
Args:
array (list): List to map and concatenate.
iteratee (mixed): Iteratee to apply to each element.
Returns:
list: Mapped and concatenated list.
Example:
>>> mapcat(range(4), lambda x: list(range(x)))
[0, 0, 1, 0, 1, 2]
.. versionadded:: 2.0.0
"""
return concat(*pyd.map_(array, iteratee))
def nth(array, pos=0):
"""
Gets the element at index n of array.
Args:
array (list): List passed in by the user.
pos (int): Index of element to return.
Returns:
mixed: Returns the element at :attr:`pos`.
Example:
>>> nth([1, 2, 3], 0)
1
>>> nth([3, 4, 5, 6], 2)
5
>>> nth([11, 22, 33], -1)
33
>>> nth([11, 22, 33])
11
.. versionadded:: 4.0.0
"""
return pyd.get(array, pos)
def pop(array, index=-1):
"""
Remove element of array at `index` and return element.
Args:
array (list): List to pop from.
index (int, optional): Index to remove element from. Defaults to ``-1``.
Returns:
mixed: Value at `index`.
Warning:
`array` is modified in place.
Example:
>>> array = [1, 2, 3, 4]
>>> item = pop(array)
>>> item
4
>>> array
[1, 2, 3]
>>> item = pop(array, index=0)
>>> item
1
>>> array
[2, 3]
.. versionadded:: 2.2.0
"""
return array.pop(index)
def pull(array, *values):
"""
Removes all provided values from the given array.
Args:
array (list): List to pull from.
values (mixed): Values to remove.
Returns:
list: Modified `array`.
Warning:
`array` is modified in place.
Example:
>>> pull([1, 2, 2, 3, 3, 4], 2, 3)
[1, 4]
.. versionadded:: 1.0.0
.. versionchanged:: 4.0.0
:func:`pull` method now calls :func:`pull_all` method for the desired
functionality.
"""
return pull_all(array, values)
def pull_all(array, values):
"""
Removes all provided values from the given array.
Args:
array (list): Array to modify.
values (list): Values to remove.
Returns:
list: Modified `array`.
Example:
>>> pull_all([1, 2, 2, 3, 3, 4], [2, 3])
[1, 4]
.. versionadded:: 4.0.0
"""
# Use this style of assignment so that `array` is mutated.
array[:] = without(array, *values)
return array
def pull_all_by(array, values, iteratee=None):
"""
This method is like :func:`pull_all` except that it accepts iteratee which is invoked for each
element of array and values to generate the criterion by which they're compared. The iteratee is
invoked with one argument: ``(value)``.
Args:
array (list): Array to modify.
values (list): Values to remove.
iteratee (mixed, optional): Function to transform the elements of the arrays. Defaults to
:func:`.identity`.
Returns:
list: Modified `array`.
Example:
>>> array = [{'x': 1}, {'x': 2}, {'x': 3}, {'x': 1}]
>>> pull_all_by(array, [{'x': 1}, {'x': 3}], 'x')
[{'x': 2}]
.. versionadded:: 4.0.0
"""
values = difference(array, difference_by(array, values, iteratee=iteratee))
return pull_all(array, values)
def pull_all_with(array, values, comparator=None):
"""
This method is like :func:`pull_all` except that it accepts comparator which is invoked to
compare elements of array to values. The comparator is invoked with two arguments: ``(arr_val,
oth_val)``.
Args:
array (list): Array to modify.
values (list): Values to remove.
comparator (callable, optional): Function to compare the elements of the arrays. Defaults to
:func:`.is_equal`.
Returns:
list: Modified `array`.
Example:
>>> array = [{'x': 1, 'y': 2}, {'x': 3, 'y': 4}, {'x': 5, 'y': 6}]
>>> res = pull_all_with(array, [{'x': 3, 'y': 4}], lambda a, b: a == b)
>>> res == [{'x': 1, 'y': 2}, {'x': 5, 'y': 6}]
True
>>> array = [{'x': 1, 'y': 2}, {'x': 3, 'y': 4}, {'x': 5, 'y': 6}]
>>> res = pull_all_with(array, [{'x': 3, 'y': 4}], lambda a, b: a != b)
>>> res == [{'x': 3, 'y': 4}]
True
.. versionadded:: 4.0.0
"""
values = difference(array, difference_with(array, values, comparator=comparator))
return pull_all(array, values)
def pull_at(array, *indexes):
"""
Removes elements from `array` corresponding to the specified indexes and returns a list of the
removed elements. Indexes may be specified as a list of indexes or as individual arguments.
Args:
array (list): List to pull from.
indexes (int): Indexes to pull.
Returns:
list: Modified `array`.
Warning:
`array` is modified in place.
Example:
>>> pull_at([1, 2, 3, 4], 0, 2)
[2, 4]
.. versionadded:: 1.1.0
"""
indexes = flatten(indexes)
for index in sorted(indexes, reverse=True):
del array[index]
return array
def push(array, *items):
"""
Push items onto the end of `array` and return modified `array`.
Args:
array (list): List to push to.
items (mixed): Items to append.
Returns:
list: Modified `array`.
Warning:
`array` is modified in place.
Example:
>>> array = [1, 2, 3]
>>> push(array, 4, 5, [6])
[1, 2, 3, 4, 5, [6]]
.. versionadded:: 2.2.0
.. versionchanged:: 4.0.0
Removed alias ``append``.
"""
for item in items:
array.append(item)
return array
def remove(array, predicate=None):
"""
Removes all elements from a list that the predicate returns truthy for and returns an array of
removed elements.
Args:
array (list): List to remove elements from.
predicate (mixed, optional): Predicate applied per iteration.
Returns:
list: Removed elements of `array`.
Warning:
`array` is modified in place.
Example:
>>> array = [1, 2, 3, 4]
>>> items = remove(array, lambda x: x >= 3)
>>> items
[3, 4]
>>> array
[1, 2]
.. versionadded:: 1.0.0
"""
removed = []
kept = []
for is_true, _, i, _ in iteriteratee(array, predicate):
if is_true:
removed.append(array[i])
else:
kept.append(array[i])
# Modify array in place.
array[:] = kept
return removed
def reverse(array):
"""
Return `array` in reverse order.
Args:
array (list|string): Object to process.
Returns:
list|string: Reverse of object.
Example:
>>> reverse([1, 2, 3, 4])
[4, 3, 2, 1]
.. versionadded:: 2.2.0
"""
# NOTE: Using this method to reverse object since it works for both lists
# and strings.
return array[::-1]
def shift(array):
"""
Remove the first element of `array` and return it.
Args:
array (list): List to shift.
Returns:
mixed: First element of `array`.
Warning:
`array` is modified in place.
Example:
>>> array = [1, 2, 3, 4]
>>> item = shift(array)
>>> item
1
>>> array
[2, 3, 4]
.. versionadded:: 2.2.0
"""
return pop(array, 0)
def slice_(array, start=0, end=None):
"""
Slices `array` from the `start` index up to, but not including, the `end` index.
Args:
array (list): Array to slice.
start (int, optional): Start index. Defaults to ``0``.
end (int, optional): End index. Defaults to selecting the value at ``start`` index.
Returns:
list: Sliced list.
Example:
>>> slice_([1, 2, 3, 4])
[1]
>>> slice_([1, 2, 3, 4], 1)
[2]
>>> slice_([1, 2, 3, 4], 1, 3)
[2, 3]
.. versionadded:: 1.1.0
"""
if end is None:
end = (start + 1) if start >= 0 else (len(array) + start + 1)
return array[start:end]
def sort(array, comparator=None, key=None, reverse=False):
"""
Sort `array` using optional `comparator`, `key`, and `reverse` options and return sorted
`array`.
Note:
Python 3 removed the option to pass a custom comparator function and instead only allows a
key function. Therefore, if a comparator function is passed in, it will be converted to a
key function automatically using ``functools.cmp_to_key``.
Args:
array (list): List to sort.
comparator (callable, optional): A custom comparator function used to sort the list.
Function should accept two arguments and return a negative, zero, or position number
depending on whether the first argument is considered smaller than, equal to, or larger
than the second argument. Defaults to ``None``. This argument is mutually exclusive with
`key`.
key (iteratee, optional): A function of one argument used to extract a a comparator key from
each list element. Defaults to ``None``. This argument is mutually exclusive with
`comparator`.
reverse (bool, optional): Whether to reverse the sort. Defaults to ``False``.
Returns:
list: Sorted list.
Warning:
`array` is modified in place.
Example:
>>> sort([2, 1, 4, 3])
[1, 2, 3, 4]
>>> sort([2, 1, 4, 3], reverse=True)
[4, 3, 2, 1]
>>> results = sort([{'a': 2, 'b': 1},\
{'a': 3, 'b': 2},\
{'a': 0, 'b': 3}],\
key=lambda item: item['a'])
>>> assert results == [{'a': 0, 'b': 3},\
{'a': 2, 'b': 1},\
{'a': 3, 'b': 2}]
.. versionadded:: 2.2.0
"""
if comparator and key:
raise ValueError('The "comparator" and "key" arguments are mutually exclusive')
if comparator:
key = cmp_to_key(comparator)
array.sort(key=key, reverse=reverse)
return array
def sorted_index(array, value):
"""
Uses a binary search to determine the lowest index at which `value` should be inserted into
`array` in order to maintain its sort order.
Args:
array (list): List to inspect.
value (mixed): Value to evaluate.
Returns:
int: Returns the index at which `value` should be inserted into `array`.
Example:
>>> sorted_index([1, 2, 2, 3, 4], 2)
1
.. versionadded:: 1.0.0
.. versionchanged:: 4.0.0
Move iteratee support to :func:`sorted_index_by`.
"""
return sorted_index_by(array, value)
def sorted_index_by(array, value, iteratee=None):
"""
This method is like :func:`sorted_index` except that it accepts iteratee which is invoked for
`value` and each element of `array` to compute their sort ranking. The iteratee is invoked with
one argument: ``(value)``.
Args:
array (list): List to inspect.
value (mixed): Value to evaluate.
iteratee (mixed, optional): The iteratee invoked per element. Defaults to :func:`.identity`.
Returns:
int: Returns the index at which `value` should be inserted into `array`.
Example:
>>> array = [{'x': 4}, {'x': 5}]
>>> sorted_index_by(array, {'x': 4}, lambda o: o['x'])
0
>>> sorted_index_by(array, {'x': 4}, 'x')
0
.. versionadded:: 4.0.0
"""
if iteratee:
# Generate array of sorted keys computed using iteratee.
iteratee = pyd.iteratee(iteratee)
array = sorted(iteratee(item) for item in array)
value = iteratee(value)
return bisect_left(array, value)
def sorted_index_of(array, value):
"""
Returns the index of the matched `value` from the sorted `array`, else ``-1``.
Args:
array (list): Array to inspect.
value (mixed): Value to search for.
Returns:
int: Returns the index of the first matched value, else ``-1``.
Example:
>>> sorted_index_of([3, 5, 7, 10], 3)
0
>>> sorted_index_of([10, 10, 5, 7, 3], 10)
-1
.. versionadded:: 4.0.0
"""
index = sorted_index(array, value)
if index < len(array) and array[index] == value:
return index
else:
return -1
def sorted_last_index(array, value):
"""
This method is like :func:`sorted_index` except that it returns the highest index at which
`value` should be inserted into `array` in order to maintain its sort order.
Args:
array (list): List to inspect.
value (mixed): Value to evaluate.
Returns:
int: Returns the index at which `value` should be inserted into `array`.
Example:
>>> sorted_last_index([1, 2, 2, 3, 4], 2)
3
.. versionadded:: 1.1.0
.. versionchanged:: 4.0.0
Move iteratee support to :func:`sorted_last_index_by`.
"""
return sorted_last_index_by(array, value)
def sorted_last_index_by(array, value, iteratee=None):
"""
This method is like :func:`sorted_last_index` except that it accepts iteratee which is invoked
for `value` and each element of `array` to compute their sort ranking. The iteratee is invoked
with one argument: ``(value)``.
Args:
array (list): List to inspect.
value (mixed): Value to evaluate.
iteratee (mixed, optional): The iteratee invoked per element. Defaults to :func:`.identity`.
Returns:
int: Returns the index at which `value` should be inserted into `array`.
Example:
>>> array = [{'x': 4}, {'x': 5}]
>>> sorted_last_index_by(array, {'x': 4}, lambda o: o['x'])
1
>>> sorted_last_index_by(array, {'x': 4}, 'x')
1
"""
if iteratee:
# Generate array of sorted keys computed using iteratee.
iteratee = pyd.iteratee(iteratee)
array = sorted(iteratee(item) for item in array)
value = iteratee(value)
return bisect_right(array, value)
def sorted_last_index_of(array, value):
"""
This method is like :func:`last_index_of` except that it performs a binary search on a sorted
`array`.
Args:
array (list): Array to inspect.
value (mixed): Value to search for.
Returns:
int: Returns the index of the matched value, else ``-1``.
Example:
>>> sorted_last_index_of([4, 5, 5, 5, 6], 5)
3
>>> sorted_last_index_of([6, 5, 5, 5, 4], 6)
-1
.. versionadded:: 4.0.0
"""
index = sorted_last_index(array, value) - 1
if index < len(array) and array[index] == value:
return index
else:
return -1
def sorted_uniq(array):
"""
Return sorted array with unique elements.
Args:
array (list): List of values to be sorted.
Returns:
list: List of unique elements in a sorted fashion.
Example:
>>> sorted_uniq([4, 2, 2, 5])
[2, 4, 5]
>>> sorted_uniq([-2, -2, 4, 1])
[-2, 1, 4]
.. versionadded:: 4.0.0
"""
return sorted(uniq(array))
def sorted_uniq_by(array, iteratee=None):
"""
This method is like :func:`sorted_uniq` except that it accepts iteratee which is invoked for
each element in array to generate the criterion by which uniqueness is computed. The order of
result values is determined by the order they occur in the array. The iteratee is invoked with
one argument: ``(value)``.
Args:
array (list): List of values to be sorted.
iteratee (mixed, optional): Function to transform the elements of the arrays. Defaults to
:func:`.identity`.
Returns:
list: Unique list.
Example:
>>> sorted_uniq_by([3, 2, 1, 3, 2, 1], lambda val: val % 2)
[2, 3]
.. versionadded:: 4.0.0
"""
return sorted(uniq_by(array, iteratee=iteratee))
def splice(array, start, count=None, *items):
"""
Modify the contents of `array` by inserting elements starting at index `start` and removing
`count` number of elements after.
Args:
array (list|str): List to splice.
start (int): Start to splice at.
count (int, optional): Number of items to remove starting at `start`. If ``None`` then all
items after `start` are removed. Defaults to ``None``.
items (mixed): Elements to insert starting at `start`. Each item is inserted in the order
given.
Returns:
list|str: The removed elements of `array` or the spliced string.
Warning:
`array` is modified in place if ``list``.
Example:
>>> array = [1, 2, 3, 4]
>>> splice(array, 1)
[2, 3, 4]
>>> array
[1]
>>> array = [1, 2, 3, 4]
>>> splice(array, 1, 2)
[2, 3]
>>> array
[1, 4]
>>> array = [1, 2, 3, 4]
>>> splice(array, 1, 2, 0, 0)
[2, 3]
>>> array
[1, 0, 0, 4]
.. versionadded:: 2.2.0
.. versionchanged:: 3.0.0
Support string splicing.
"""
if count is None:
count = len(array) - start
is_string = pyd.is_string(array)
if is_string:
array = list(array)
removed = array[start : start + count]
del array[start : start + count]
for item in reverse(items):
array.insert(start, item)
if is_string:
return "".join(array)
else:
return removed
def split_at(array, index):
"""
Returns a list of two lists composed of the split of `array` at `index`.
Args:
array (list): List to split.
index (int): Index to split at.
Returns:
list: Split list.
Example:
>>> split_at([1, 2, 3, 4], 2)
[[1, 2], [3, 4]]
.. versionadded:: 2.0.0
"""
return [array[:index], array[index:]]
def tail(array):
"""
Return all but the first element of `array`.
Args:
array (list): List to process.
Returns:
list: Rest of the list.
Example:
>>> tail([1, 2, 3, 4])
[2, 3, 4]
.. versionadded:: 1.0.0
.. versionchanged:: 4.0.0
Renamed from ``rest`` to ``tail``.
"""
return array[1:]
def take(array, n=1):
"""
Creates a slice of `array` with `n` elements taken from the beginning.
Args:
array (list): List to process.
n (int, optional): Number of elements to take. Defaults to ``1``.
Returns:
list: Taken list.
Example:
>>> take([1, 2, 3, 4], 2)
[1, 2]
.. versionadded:: 1.0.0
.. versionchanged:: 1.1.0
Added ``n`` argument and removed as alias of :func:`first`.
.. versionchanged:: 3.0.0
Made ``n`` default to ``1``.
"""
return take_while(array, lambda _, index: index < n)
def take_right(array, n=1):
"""
Creates a slice of `array` with `n` elements taken from the end.
Args:
array (list): List to process.
n (int, optional): Number of elements to take. Defaults to ``1``.
Returns:
list: Taken list.
Example:
>>> take_right([1, 2, 3, 4], 2)
[3, 4]
.. versionadded:: 1.1.0
.. versionchanged:: 3.0.0
Made ``n`` default to ``1``.
"""
length = len(array)
return take_right_while(array, lambda _, index: (length - index) <= n)
def take_right_while(array, predicate=None):
"""
Creates a slice of `array` with elements taken from the end. Elements are taken until the
`predicate` returns falsey. The `predicate` is invoked with three arguments: ``(value, index,
array)``.
Args:
array (list): List to process.
predicate (mixed): Predicate called per iteration
Returns:
list: Dropped list.
Example:
>>> take_right_while([1, 2, 3, 4], lambda x: x >= 3)
[3, 4]
.. versionadded:: 1.1.0
"""
n = len(array)
for is_true, _, _, _ in iteriteratee(array, predicate, reverse=True):
if is_true:
n -= 1
else:
break
return array[n:]
def take_while(array, predicate=None):
"""
Creates a slice of `array` with elements taken from the beginning. Elements are taken until the
`predicate` returns falsey. The `predicate` is invoked with three arguments: ``(value, index,
array)``.
Args:
array (list): List to process.
predicate (mixed): Predicate called per iteration
Returns:
list: Taken list.
Example:
>>> take_while([1, 2, 3, 4], lambda x: x < 3)
[1, 2]
.. versionadded:: 1.1.0
"""
n = 0
for is_true, _, _, _ in iteriteratee(array, predicate):
if is_true:
n += 1
else:
break
return array[:n]
def union(array, *others):
"""
Computes the union of the passed-in arrays.
Args:
array (list): List to union with.
others (list): Lists to unionize with `array`.
Returns:
list: Unionized list.
Example:
>>> union([1, 2, 3], [2, 3, 4], [3, 4, 5])
[1, 2, 3, 4, 5]
.. versionadded:: 1.0.0
"""
if not others:
return array[:]
return uniq(flatten([array] + list(others)))
def union_by(array, *others, **kwargs):
"""
This method is similar to :func:`union` except that it accepts iteratee which is invoked for
each element of each arrays to generate the criterion by which uniqueness is computed.
Args:
array (list): List to unionize with.
others (list): Lists to unionize with `array`.
Keyword Args:
iteratee (callable): Function to invoke on each element.
Returns:
list: Unionized list.
Example:
>>> union_by([1, 2, 3], [2, 3, 4], iteratee=lambda x: x % 2)
[1, 2]
>>> union_by([1, 2, 3], [2, 3, 4], iteratee=lambda x: x % 9)
[1, 2, 3, 4]
.. versionadded:: 4.0.0
"""
if not others:
return array[:]
iteratee, others = parse_iteratee("iteratee", *others, **kwargs)
return uniq_by(flatten([array] + list(others)), iteratee=iteratee)
def union_with(array, *others, **kwargs):
"""
This method is like :func:`union` except that it accepts comparator which is invoked to compare
elements of arrays. Result values are chosen from the first array in which the value occurs.
Args:
array (list): List to unionize with.
others (list): Lists to unionize with `array`.
Keyword Args:
comparator (callable, optional): Function to compare the elements of the arrays. Defaults to
:func:`.is_equal`.
Returns:
list: Unionized list.
Example:
>>> comparator = lambda a, b: (a % 2) == (b % 2)
>>> union_with([1, 2, 3], [2, 3, 4], comparator=comparator)
[1, 2]
>>> union_with([1, 2, 3], [2, 3, 4])
[1, 2, 3, 4]
.. versionadded:: 4.0.0
"""
if not others:
return array[:]
comparator, others = parse_iteratee("comparator", *others, **kwargs)
return uniq_with(flatten([array] + list(others)), comparator=comparator)
def uniq(array):
"""
Creates a duplicate-value-free version of the array. If iteratee is passed, each element of
array is passed through a iteratee before uniqueness is computed. The iteratee is invoked with
three arguments: ``(value, index, array)``. If an object path is passed for iteratee, the
created iteratee will return the path value of the given element. If an object is passed for
iteratee, the created filter style iteratee will return ``True`` for elements that have the
properties of the given object, else ``False``.
Args:
array (list): List to process.
Returns:
list: Unique list.
Example:
>>> uniq([1, 2, 3, 1, 2, 3])
[1, 2, 3]
.. versionadded:: 1.0.0
.. versionchanged:: 4.0.0
- Moved `iteratee` argument to :func:`uniq_by`.
- Removed alias ``unique``.
"""
return uniq_by(array)
def uniq_by(array, iteratee=None):
"""
This method is like :func:`uniq` except that it accepts iteratee which is invoked for each
element in array to generate the criterion by which uniqueness is computed. The order of result
values is determined by the order they occur in the array. The iteratee is invoked with one
argument: ``(value)``.
Args:
array (list): List to process.
iteratee (mixed, optional): Function to transform the elements of the arrays. Defaults to
:func:`.identity`.
Returns:
list: Unique list.
Example:
>>> uniq_by([1, 2, 3, 1, 2, 3], lambda val: val % 2)
[1, 2]
.. versionadded:: 4.0.0
"""
return list(iterunique(array, iteratee=iteratee))
def uniq_with(array, comparator=None):
"""
This method is like :func:`uniq` except that it accepts comparator which is invoked to compare
elements of array. The order of result values is determined by the order they occur in the
array.The comparator is invoked with two arguments: ``(value, other)``.
Args:
array (list): List to process.
comparator (callable, optional): Function to compare the elements of the arrays. Defaults to
:func:`.is_equal`.
Returns:
list: Unique list.
Example:
>>> uniq_with([1, 2, 3, 4, 5], lambda a, b: (a % 2) == (b % 2))
[1, 2]
.. versionadded:: 4.0.0
"""
return list(iterunique(array, comparator=comparator))
def unshift(array, *items):
"""
Insert the given elements at the beginning of `array` and return the modified list.
Args:
array (list): List to modify.
items (mixed): Items to insert.
Returns:
list: Modified list.
Warning:
`array` is modified in place.
Example:
>>> array = [1, 2, 3, 4]
>>> unshift(array, -1, -2)
[-1, -2, 1, 2, 3, 4]
>>> array
[-1, -2, 1, 2, 3, 4]
.. versionadded:: 2.2.0
"""
for item in reverse(items):
array.insert(0, item)
return array
def unzip(array):
"""
The inverse of :func:`zip_`, this method splits groups of elements into lists composed of
elements from each group at their corresponding indexes.
Args:
array (list): List to process.
Returns:
list: Unzipped list.
Example:
>>> unzip([[1, 4, 7], [2, 5, 8], [3, 6, 9]])
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
.. versionadded:: 1.0.0
"""
return zip_(*array)
def unzip_with(array, iteratee=None):
"""
This method is like :func:`unzip` except that it accepts a iteratee to specify how regrouped
values should be combined. The iteratee is invoked with four arguments: ``(accumulator, value,
index, group)``.
Args:
array (list): List to process.
iteratee (callable, optional): Function to combine regrouped values.
Returns:
list: Unzipped list.
Example:
>>> from pydash import add
>>> unzip_with([[1, 10, 100], [2, 20, 200]], add)
[3, 30, 300]
.. versionadded:: 3.3.0
"""
if not array:
return []
result = unzip(array)
if iteratee is None:
return result
def cbk(group):
return pyd.reduce_(group, iteratee)
return pyd.map_(result, cbk)
def without(array, *values):
"""
Creates an array with all occurrences of the passed values removed.
Args:
array (list): List to filter.
values (mixed): Values to remove.
Returns:
list: Filtered list.
Example:
>>> without([1, 2, 3, 2, 4, 4], 2, 4)
[1, 3]
.. versionadded:: 1.0.0
"""
return [item for item in array if item not in values]
def xor(array, *lists):
"""
Creates a list that is the symmetric difference of the provided lists.
Args:
array (list): List to process.
*lists (list): Lists to xor with.
Returns:
list: XOR'd list.
Example:
>>> xor([1, 3, 4], [1, 2, 4], [2])
[3]
.. versionadded:: 1.0.0
"""
return xor_by(array, *lists)
def xor_by(array, *lists, **kwargs):
"""
This method is like :func:`xor` except that it accepts iteratee which is invoked for each
element of each arrays to generate the criterion by which by which they're compared. The order
of result values is determined by the order they occur in the arrays. The iteratee is invoked
with one argument: ``(value)``.
Args:
array (list): List to process.
*lists (list): Lists to xor with.
Keyword Args:
iteratee (mixed, optional): Function to transform the elements of the arrays. Defaults to
:func:`.identity`.
Returns:
list: XOR'd list.
Example:
>>> xor_by([2.1, 1.2], [2.3, 3.4], round)
[1.2, 3.4]
>>> xor_by([{'x': 1}], [{'x': 2}, {'x': 1}], 'x')
[{'x': 2}]
.. versionadded:: 4.0.0
"""
if not lists:
return array[:]
iteratee, lists = parse_iteratee("iteratee", *lists, **kwargs)
return xor(
uniq(
difference_by(
array + lists[0],
intersection_by(array, lists[0], iteratee=iteratee),
iteratee=iteratee,
)
),
*lists[1:]
)
def xor_with(array, *lists, **kwargs):
"""
This method is like :func:`xor` except that it accepts comparator which is invoked to compare
elements of arrays. The order of result values is determined by the order they occur in the
arrays. The comparator is invoked with two arguments: ``(arr_val, oth_val)``.
Args:
array (list): List to process.
*lists (list): Lists to xor with.
Keyword Args:
comparator (callable, optional): Function to compare the elements of the arrays. Defaults to
:func:`.is_equal`.
Returns:
list: XOR'd list.
Example:
>>> objects = [{'x': 1, 'y': 2}, {'x': 2, 'y': 1}]
>>> others = [{'x': 1, 'y': 1}, {'x': 1, 'y': 2}]
>>> expected = [{'y': 1, 'x': 2}, {'y': 1, 'x': 1}]
>>> xor_with(objects, others, lambda a, b: a == b) == expected
True
.. versionadded:: 4.0.0
"""
if not lists:
return array[:]
comp, lists = parse_iteratee("comparator", *lists, **kwargs)
return xor_with(
uniq(
difference_with(
array + lists[0],
intersection_with(array, lists[0], comparator=comp),
comparator=comp,
)
),
*lists[1:]
)
def zip_(*arrays):
"""
Groups the elements of each array at their corresponding indexes. Useful for separate data
sources that are coordinated through matching array indexes.
Args:
arrays (list): Lists to process.
Returns:
list: Zipped list.
Example:
>>> zip_([1, 2, 3], [4, 5, 6], [7, 8, 9])
[[1, 4, 7], [2, 5, 8], [3, 6, 9]]
.. versionadded:: 1.0.0
"""
# zip returns as a list of tuples so convert to list of lists
return [list(item) for item in zip(*arrays)]
def zip_object(keys, values=None):
"""
Creates a dict composed from lists of keys and values. Pass either a single two dimensional
list, i.e. ``[[key1, value1], [key2, value2]]``, or two lists, one of keys and one of
corresponding values.
Args:
keys (list): Either a list of keys or a list of ``[key, value]`` pairs.
values (list, optional): List of values to zip.
Returns:
dict: Zipped dict.
Example:
>>> zip_object([1, 2, 3], [4, 5, 6])
{1: 4, 2: 5, 3: 6}
.. versionadded:: 1.0.0
.. versionchanged:: 4.0.0
Removed alias ``object_``.
"""
if values is None:
keys, values = unzip(keys)
return dict(zip(keys, values))
def zip_object_deep(keys, values=None):
"""
This method is like :func:`zip_object` except that it supports property paths.
Args:
keys (list): Either a list of keys or a list of ``[key, value]`` pairs.
values (list, optional): List of values to zip.
Returns:
dict: Zipped dict.
Example:
>>> expected = {'a': {'b': {'c': 1, 'd': 2}}}
>>> zip_object_deep(['a.b.c', 'a.b.d'], [1, 2]) == expected
True
.. versionadded:: 4.0.0
"""
if values is None: # pragma: no cover
keys, values = unzip(keys)
obj = {}
for idx, key in enumerate(keys):
obj = pyd.set_(obj, key, pyd.get(values, idx))
return obj
def zip_with(*arrays, **kwargs):
"""
This method is like :func:`zip` except that it accepts a iteratee to specify how grouped values
should be combined. The iteratee is invoked with four arguments: ``(accumulator, value, index,
group)``.
Args:
*arrays (list): Lists to process.
Keyword Args:
iteratee (callable): Function to combine grouped values.
Returns:
list: Zipped list of grouped elements.
Example:
>>> from pydash import add
>>> zip_with([1, 2], [10, 20], [100, 200], add)
[111, 222]
>>> zip_with([1, 2], [10, 20], [100, 200], iteratee=add)
[111, 222]
.. versionadded:: 3.3.0
"""
if "iteratee" in kwargs:
iteratee = kwargs["iteratee"]
elif len(arrays) > 1:
iteratee = arrays[-1]
arrays = arrays[:-1]
else:
iteratee = None
return unzip_with(arrays, iteratee)
#
# Utility methods not a part of the main API
#
def iterflatten(array, depth=-1):
"""Iteratively flatten a list shallowly or deeply."""
for item in array:
if isinstance(item, (list, tuple)) and depth != 0:
for subitem in iterflatten(item, depth - 1):
yield subitem
else:
yield item
def iterinterleave(*arrays):
"""Interleave multiple lists."""
iters = [iter(arr) for arr in arrays]
while iters:
nextiters = []
for itr in iters:
try:
yield next(itr)
nextiters.append(itr)
except StopIteration:
pass
iters = nextiters
def iterintersperse(iterable, separator):
"""Iteratively intersperse iterable."""
iterable = iter(iterable)
yield next(iterable)
for item in iterable:
yield separator
yield item
def iterunique(array, comparator=None, iteratee=None): # noqa: C901
"""Yield each unique item in array."""
if not array: # pragma: no cover
return
if iteratee is not None:
iteratee = pyd.iteratee(iteratee)
seen_hashable = set()
seen_unhashable = []
for item in array:
if iteratee is None:
cmp_item = item
else:
cmp_item = iteratee(item)
if comparator is None:
try:
if cmp_item not in seen_hashable:
yield item
seen_hashable.add(cmp_item)
except TypeError:
if cmp_item not in seen_unhashable:
yield item
seen_unhashable.append(cmp_item)
else:
unseen = True
for seen_item in seen_unhashable:
if comparator(cmp_item, seen_item):
unseen = False
break
if unseen:
yield item
seen_unhashable.append(cmp_item)
def iterduplicates(array):
"""Yield duplictes found in `array`."""
seen = []
for i, item in enumerate(array):
if item in seen:
yield i, item
else:
seen.append(item)
def iterintersection(array, other, comparator=None, iteratee=None):
"""Yield intersecting values between `array` and `other` using `comparator` to determine if they
intersect."""
if not array or not other: # pragma: no cover
return
if comparator is None:
comparator = pyd.is_equal
iteratee = pyd.iteratee(iteratee)
# NOTE: Maintain ordering of yielded values based on `array` ordering.
seen = []
for item in array:
cmp_item = iteratee(item)
if cmp_item in seen:
continue
seen.append(cmp_item)
seen_others = []
for value in other:
cmp_value = iteratee(value)
if cmp_value in seen_others:
continue
seen_others.append(cmp_value)
if comparator(cmp_item, cmp_value):
yield item
break
def iterdifference(array, other, comparator=None, iteratee=None):
"""Yield different values in `array` as compared to `other` using `comparator` to determine if
they are different."""
if not array or not other: # pragma: no cover
return
if comparator is None:
comparator = pyd.is_equal
iteratee = pyd.iteratee(iteratee)
def is_different(item, seen):
is_diff = True
if item not in seen:
for value in other:
if comparator(iteratee(item), iteratee(value)):
is_diff = False
break
if is_diff:
seen.append(item)
return is_diff
seen = []
not_seen = []
for item in array:
if item in not_seen or is_different(item, seen):
yield item
| mit | 3725b1ae098b2f0f6a15438569546cb7 | 23.399174 | 100 | 0.560902 | 3.773872 | false | false | false | false |
dgilland/pydash | src/pydash/helpers.py | 1 | 8525 | """Generic utility methods not part of main API."""
import builtins
from collections.abc import Hashable, Iterable, Mapping, Sequence
from decimal import Decimal
from functools import wraps
import inspect
from inspect import getfullargspec
import warnings
import pydash as pyd
#: Singleton object that differentiates between an explicit ``None`` value and an unset value.
UNSET = object()
#: Tuple of number types.
NUMBER_TYPES = (int, float, Decimal)
#: Dictionary of builtins with keys as the builtin function and values as the string name.
BUILTINS = {value: key for key, value in builtins.__dict__.items() if isinstance(value, Hashable)}
def callit(iteratee, *args, **kwargs):
"""Inspect argspec of `iteratee` function and only pass the supported arguments when calling
it."""
maxargs = len(args)
argcount = kwargs["argcount"] if "argcount" in kwargs else getargcount(iteratee, maxargs)
argstop = min([maxargs, argcount])
return iteratee(*args[:argstop])
def getargcount(iteratee, maxargs):
"""Return argument count of iteratee function."""
if hasattr(iteratee, "_argcount"):
# Optimization feature where argcount of iteratee is known and properly
# set by initiator.
return iteratee._argcount
if isinstance(iteratee, type) or pyd.is_builtin(iteratee):
# Only pass single argument to type iteratees or builtins.
argcount = 1
else:
argcount = 1
try:
argcount = _getargcount(iteratee, maxargs)
except TypeError: # pragma: no cover
pass
return argcount
def _getargcount(iteratee, maxargs):
argcount = None
try:
# PY2: inspect.signature was added in Python 3.
# Try to use inspect.signature when possible since it works better for our purpose of
# getting the iteratee argcount since it takes into account the "self" argument in callable
# classes.
sig = inspect.signature(iteratee)
except (TypeError, ValueError, AttributeError):
pass
else: # pragma: no cover
if not any(
param.kind == inspect.Parameter.VAR_POSITIONAL for param in sig.parameters.values()
):
argcount = len(sig.parameters)
if argcount is None:
argspec = getfullargspec(iteratee)
if argspec and not argspec.varargs: # pragma: no cover
# Use inspected arg count.
argcount = len(argspec.args)
if argcount is None:
# Assume all args are handleable.
argcount = maxargs
return argcount
def iteriteratee(obj, iteratee=None, reverse=False):
"""Return iterative iteratee based on collection type."""
if iteratee is None:
cbk = pyd.identity
argcount = 1
else:
cbk = pyd.iteratee(iteratee)
argcount = getargcount(cbk, maxargs=3)
items = iterator(obj)
if reverse:
items = reversed(tuple(items))
for key, item in items:
yield callit(cbk, item, key, obj, argcount=argcount), item, key, obj
def iterator(obj):
"""Return iterative based on object type."""
if isinstance(obj, Mapping):
return obj.items()
elif hasattr(obj, "iteritems"):
return obj.iteritems() # noqa: B301
elif hasattr(obj, "items"):
return iter(obj.items())
elif isinstance(obj, Iterable):
return enumerate(obj)
else:
return getattr(obj, "__dict__", {}).items()
def base_get(obj, key, default=UNSET):
"""
Safely get an item by `key` from a sequence or mapping object when `default` provided.
Args:
obj (list|dict): Sequence or mapping to retrieve item from.
key (mixed): Key or index identifying which item to retrieve.
default (mixed, optional): Default value to return if `key` not found in `obj`.
Returns:
mixed: `obj[key]`, `obj.key`, or `default`.
Raises:
KeyError: If `obj` is missing key, index, or attribute and no default value provided.
"""
if isinstance(obj, dict):
value = _base_get_dict(obj, key, default=default)
elif not isinstance(obj, (Mapping, Sequence)) or (
isinstance(obj, tuple) and hasattr(obj, "_fields")
):
# Don't use getattr for dict/list objects since we don't want class methods/attributes
# returned for them but do allow getattr for namedtuple.
value = _base_get_object(obj, key, default=default)
else:
value = _base_get_item(obj, key, default=default)
if value is UNSET:
# Raise if there's no default provided.
raise KeyError(f'Object "{repr(obj)}" does not have key "{key}"')
return value
def _base_get_dict(obj, key, default=UNSET):
value = obj.get(key, UNSET)
if value is UNSET:
value = default
if not isinstance(key, int):
# Try integer key fallback.
try:
value = obj.get(int(key), default)
except Exception:
pass
return value
def _base_get_item(obj, key, default=UNSET):
try:
return obj[key]
except Exception:
pass
if not isinstance(key, int):
try:
return obj[int(key)]
except Exception:
pass
return default
def _base_get_object(obj, key, default=UNSET):
value = _base_get_item(obj, key, default=UNSET)
if value is UNSET:
value = default
try:
value = getattr(obj, key)
except Exception:
pass
return value
def base_set(obj, key, value, allow_override=True):
"""
Set an object's `key` to `value`. If `obj` is a ``list`` and the `key` is the next available
index position, append to list; otherwise, pad the list of ``None`` and then append to the list.
Args:
obj (list|dict): Object to assign value to.
key (mixed): Key or index to assign to.
value (mixed): Value to assign.
allow_override (bool): Whether to allow overriding a previously set key.
"""
if isinstance(obj, dict):
if allow_override or key not in obj:
obj[key] = value
elif isinstance(obj, list):
key = int(key)
if key < len(obj):
if allow_override:
obj[key] = value
else:
if key > len(obj):
# Pad list object with None values up to the index key so we can append the value
# into the key index.
obj[:] = (obj + [None] * key)[:key]
obj.append(value)
elif (allow_override or not hasattr(obj, key)) and obj is not None:
setattr(obj, key, value)
return obj
def cmp(a, b): # pragma: no cover
"""
Replacement for built-in function ``cmp`` that was removed in Python 3.
Note: Mainly used for comparison during sorting.
"""
if a is None and b is None:
return 0
elif a is None:
return -1
elif b is None:
return 1
return (a > b) - (a < b)
def parse_iteratee(iteratee_keyword, *args, **kwargs):
"""Try to find iteratee function passed in either as a keyword argument or as the last
positional argument in `args`."""
iteratee = kwargs.get(iteratee_keyword)
last_arg = args[-1]
if iteratee is None and (
callable(last_arg)
or isinstance(last_arg, str)
or isinstance(last_arg, dict)
or last_arg is None
):
iteratee = last_arg
args = args[:-1]
return iteratee, args
class iterator_with_default(object):
"""A wrapper around an iterator object that provides a default."""
def __init__(self, collection, default):
self.iter = iter(collection)
self.default = default
def __iter__(self):
return self
def next_default(self):
ret = self.default
self.default = UNSET
return ret
def __next__(self):
ret = next(self.iter, self.next_default())
if ret is UNSET:
raise StopIteration
return ret
next = __next__
def deprecated(func): # pragma: no cover
"""
This is a decorator which can be used to mark functions as deprecated.
It will result in a warning being emitted when the function is used.
"""
@wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(
f"Call to deprecated function {func.__name__}.",
category=DeprecationWarning,
stacklevel=3,
)
return func(*args, **kwargs)
return wrapper
| mit | 5ede524c30c948d050425134d19e6b2e | 27.996599 | 100 | 0.61607 | 4.057592 | false | false | false | false |
dgilland/pydash | tasks.py | 1 | 3748 | """
This module provides the CLI interface for invoke tasks.
All tasks can be executed from this file's directory using:
$ inv <task>
Where <task> is a function defined below with the @task decorator.
"""
from functools import partial
import os
from invoke import Exit, UnexpectedExit, run as _run, task
PACKAGE_NAME = "pydash"
PACKAGE_SOURCE = f"src/{PACKAGE_NAME}"
TEST_TARGETS = f"{PACKAGE_SOURCE} tests"
LINT_TARGETS = f"{TEST_TARGETS} tasks.py"
EXIT_EXCEPTIONS = (Exit, UnexpectedExit, SystemExit)
# Set pyt=True to enable colored output when available.
run = partial(_run, pty=True)
@task
def black(ctx, quiet=False):
"""Autoformat code using black."""
run(f"black {LINT_TARGETS}", hide=quiet)
@task
def isort(ctx, quiet=False):
"""Autoformat Python imports."""
run(f"isort {LINT_TARGETS}", hide=quiet)
@task
def docformatter(ctx):
"""Autoformat docstrings using docformatter."""
run(
f"docformatter -r {LINT_TARGETS} "
f"--in-place --pre-summary-newline --wrap-descriptions 100 --wrap-summaries 100"
)
@task
def fmt(ctx):
"""Autoformat code and docstrings."""
print("Running docformatter")
docformatter(ctx)
print("Running isort")
isort(ctx, quiet=True)
print("Running black")
black(ctx, quiet=True)
@task
def flake8(ctx):
"""Check code for PEP8 violations using flake8."""
run(f"flake8 --format=pylint {LINT_TARGETS}")
@task
def pylint(ctx):
"""Check code for static errors using pylint."""
run(f"pylint {LINT_TARGETS}")
@task
def lint(ctx):
"""Run linters."""
linters = {"flake8": flake8, "pylint": pylint}
failures = []
print(f"Preparing to run linters: {', '.join(linters)}\n")
for name, linter in linters.items():
print(f"Running {name}")
try:
linter(ctx)
except EXIT_EXCEPTIONS:
failures.append(name)
result = "FAILED"
else:
result = "PASSED"
print(f"{result}\n")
if failures:
failed = ", ".join(failures)
raise Exit(f"ERROR: Linters that failed: {failed}")
@task(help={"args": "Override default pytest arguments"})
def test(ctx, args=f"{TEST_TARGETS} --cov={PACKAGE_NAME}"):
"""Run unit tests using pytest."""
tox_env_site_packages_dir = os.getenv("TOX_ENV_SITE_PACKAGES_DIR")
if tox_env_site_packages_dir:
# Re-path package source to match tox env so that we generate proper coverage report.
tox_env_pkg_src = os.path.join(tox_env_site_packages_dir, os.path.basename(PACKAGE_SOURCE))
args = args.replace(PACKAGE_SOURCE, tox_env_pkg_src)
run(f"pytest {args}")
@task
def ci(ctx):
"""Run linters and tests."""
print("Building package")
build(ctx)
print("Building docs")
docs(ctx)
print("Checking linters")
lint(ctx)
print("Running unit tests")
test(ctx)
@task
def docs(ctx, serve=False, bind="127.0.0.1", port=8000):
"""Build docs."""
run("rm -rf docs/_build")
run("sphinx-build -q -W -b html docs docs/_build/html")
if serve:
print(f"Serving docs on {bind} port {port} (http://{bind}:{port}/) ...")
run(f"python -m http.server -b {bind} --directory docs/_build/html {port}", hide=True)
@task
def build(ctx):
"""Build Python package."""
run("rm -rf dist build docs/_build")
run("python -m build")
@task
def clean(ctx):
"""Remove temporary files related to development."""
run("find . -type f -name '*.py[cod]' -delete -o -type d -name __pycache__ -delete")
run("rm -rf .tox .coverage .cache .pytest_cache **/.egg* **/*.egg* dist build")
@task(pre=[build])
def release(ctx):
"""Release Python package."""
run("twine upload dist/*")
| mit | c8a5222faa74ad36633665ff2cca88a9 | 23.337662 | 99 | 0.631537 | 3.385727 | false | true | false | false |
pebble/cloudpebble | ide/migrations/0022_auto__add_field_sourcefile_target.py | 3 | 11729 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SourceFile.target'
db.add_column(u'ide_sourcefile', 'target',
self.gf('django.db.models.fields.CharField')(default='app', max_length=10),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SourceFile.target'
db.delete_column(u'ide_sourcefile', 'target')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'ide.buildresult': {
'Meta': {'object_name': 'BuildResult'},
'binary_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'finished': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'builds'", 'to': "orm['ide.Project']"}),
'resource_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'started': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'total_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'41b21bff-a4ea-459a-89f4-746c1ae538f8'", 'max_length': '36'})
},
'ide.project': {
'Meta': {'object_name': 'Project'},
'app_capabilities': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'app_company_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'app_is_watchface': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'app_jshint': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'app_keys': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'app_long_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'app_short_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'app_uuid': ('django.db.models.fields.CharField', [], {'default': "'27b0c9e9-ae42-4419-abae-0e316f4f38b9'", 'max_length': '36', 'null': 'True', 'blank': 'True'}),
'app_version_code': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'app_version_label': ('django.db.models.fields.CharField', [], {'default': "'1.0'", 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'github_branch': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'github_hook_build': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'github_hook_uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'github_last_commit': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'github_last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_repo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'optimisation': ('django.db.models.fields.CharField', [], {'default': "'s'", 'max_length': '1'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'project_type': ('django.db.models.fields.CharField', [], {'default': "'native'", 'max_length': '10'})
},
'ide.resourcefile': {
'Meta': {'unique_together': "(('project', 'file_name'),)", 'object_name': 'ResourceFile'},
'file_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_menu_icon': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '9'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'resources'", 'to': "orm['ide.Project']"})
},
'ide.resourceidentifier': {
'Meta': {'unique_together': "(('resource_file', 'resource_id'),)", 'object_name': 'ResourceIdentifier'},
'character_regex': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource_file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'identifiers'", 'to': "orm['ide.ResourceFile']"}),
'resource_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tracking': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'ide.sourcefile': {
'Meta': {'unique_together': "(('project', 'file_name'),)", 'object_name': 'SourceFile'},
'file_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'source_files'", 'to': "orm['ide.Project']"}),
'target': ('django.db.models.fields.CharField', [], {'default': "'app'", 'max_length': '10'})
},
'ide.templateproject': {
'Meta': {'object_name': 'TemplateProject', '_ormbases': ['ide.Project']},
u'project_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['ide.Project']", 'unique': 'True', 'primary_key': 'True'}),
'template_kind': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'})
},
'ide.usergithub': {
'Meta': {'object_name': 'UserGithub'},
'avatar': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'nonce': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'github'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['auth.User']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'ide.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'accepted_terms': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'autocomplete': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'keybinds': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '20'}),
'tab_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'cloudpebble'", 'max_length': '50'}),
'use_spaces': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'whats_new': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10'})
}
}
complete_apps = ['ide'] | mit | c60bf5c34798209e62dc7f13df77644f | 79.342466 | 195 | 0.550431 | 3.577005 | false | false | false | false |
pebble/cloudpebble | ide/migrations/0007_auto__add_field_project_optimisation.py | 3 | 9269 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.optimisation'
db.add_column(u'ide_project', 'optimisation',
self.gf('django.db.models.fields.CharField')(default='0', max_length=1),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Project.optimisation'
db.delete_column(u'ide_project', 'optimisation')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ide.buildresult': {
'Meta': {'object_name': 'BuildResult'},
'finished': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'builds'", 'to': u"orm['ide.Project']"}),
'started': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'e3737ad854eb41dcaa581678c7f0d68d'", 'max_length': '32'})
},
u'ide.project': {
'Meta': {'unique_together': "(('owner', 'name'),)", 'object_name': 'Project'},
'github_hook_build': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'github_hook_uuid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'github_last_commit': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'github_last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_repo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'optimisation': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '1'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'version_def_name': ('django.db.models.fields.CharField', [], {'default': "'APP_RESOURCES'", 'max_length': '50'})
},
u'ide.resourcefile': {
'Meta': {'unique_together': "(('project', 'file_name'),)", 'object_name': 'ResourceFile'},
'file_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '9'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'resources'", 'to': u"orm['ide.Project']"})
},
u'ide.resourceidentifier': {
'Meta': {'unique_together': "(('resource_file', 'resource_id'),)", 'object_name': 'ResourceIdentifier'},
'character_regex': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource_file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'identifiers'", 'to': u"orm['ide.ResourceFile']"}),
'resource_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tracking': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'ide.sourcefile': {
'Meta': {'unique_together': "(('project', 'file_name'),)", 'object_name': 'SourceFile'},
'file_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'source_files'", 'to': u"orm['ide.Project']"})
},
u'ide.templateproject': {
'Meta': {'object_name': 'TemplateProject', '_ormbases': [u'ide.Project']},
u'project_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['ide.Project']", 'unique': 'True', 'primary_key': 'True'}),
'template_kind': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'})
},
u'ide.usergithub': {
'Meta': {'object_name': 'UserGithub'},
'avatar': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'nonce': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'github'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['auth.User']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
u'ide.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'autocomplete': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'keybinds': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '20'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'monokai'", 'max_length': '50'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['ide'] | mit | bafb9b3954723d86654c10fe856f5c15 | 73.16 | 187 | 0.54925 | 3.60101 | false | false | false | false |
pebble/cloudpebble | ide/api/npm.py | 2 | 1450 | import urllib
import requests
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_safe
from django.http import Http404
from utils.td_helper import send_td_event
from utils.jsonview import json_view
from utils.filter_dict import filter_dict
__author__ = 'katharine'
PACKAGE_SPEC = {
'version': True,
'name': True,
'description': True,
'keywords': True,
'author': True,
'_id': 'name'
}
@login_required
@require_safe
@json_view
def npm_search(request):
try:
query = request.GET['q']
except KeyError:
return {'packages': []}
search = requests.get('http://node-modules.com/search.json', {'q': query}).json()
data = {'packages': [filter_dict(package, PACKAGE_SPEC) for package in search]}
send_td_event('cloudpebble_package_search', data={
'data': {
'query': query
}
}, request=request)
return data
@login_required
@require_safe
@json_view
def npm_info(request):
query = request.GET['q']
try:
package = requests.get('http://node-modules.com/package/%s.json' % urllib.quote(query)).json()
except ValueError:
raise Http404("Package not found")
data = {
'package': filter_dict(package, PACKAGE_SPEC)
}
send_td_event('cloudpebble_package_get_info', data={
'data': {
'query': query
}
}, request=request)
return data
| mit | 31e3fd53578e624aebb3da06e69bf681 | 22.770492 | 102 | 0.635172 | 3.527981 | false | false | false | false |
bioconda/bioconda-utils | bioconda_utils/graph.py | 1 | 4478 | """
Construction and Manipulation of Package/Recipe Graphs
"""
import logging
from collections import defaultdict
from fnmatch import fnmatch
from itertools import chain
import networkx as nx
from . import utils
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def build(recipes, config, blacklist=None, restrict=True):
"""
Returns the DAG of recipe paths and a dictionary that maps package names to
lists of recipe paths to all defined versions of the package. defined
versions.
Parameters
----------
recipes : iterable
An iterable of recipe paths, typically obtained via `get_recipes()`
blacklist : set
Package names to skip
restrict : bool
If True, then dependencies will be included in the DAG only if they are
themselves in `recipes`. Otherwise, include all dependencies of
`recipes`.
Returns
-------
dag : nx.DiGraph
Directed graph of packages -- nodes are package names; edges are
dependencies (both run and build dependencies)
name2recipe : dict
Dictionary mapping package names to recipe paths. These recipe path
values are lists and contain paths to all defined versions.
"""
logger.info("Generating DAG")
recipes = list(recipes)
metadata = list(utils.parallel_iter(utils.load_meta_fast, recipes, "Loading Recipes"))
if blacklist is None:
blacklist = set()
# name2recipe is meta.yaml's package:name mapped to the recipe path.
#
# A name should map to exactly one recipe. It is possible for multiple
# names to map to the same recipe, if the package name somehow depends on
# the environment.
#
# Note that this may change once we support conda-build 3.
name2recipe = defaultdict(set)
for meta, recipe in metadata:
name = meta["package"]["name"]
if name not in blacklist:
name2recipe[name].update([recipe])
def get_deps(meta, sec):
reqs = meta.get("requirements")
if not reqs:
return []
deps = reqs.get(sec)
if not deps:
return []
return [dep.split()[0] for dep in deps if dep]
def get_inner_deps(dependencies):
dependencies = list(dependencies)
for dep in dependencies:
if dep in name2recipe or not restrict:
yield dep
dag = nx.DiGraph()
dag.add_nodes_from(meta["package"]["name"]
for meta, recipe in metadata)
for meta, recipe in metadata:
name = meta["package"]["name"]
dag.add_edges_from(
(dep, name)
for dep in set(chain(
get_inner_deps(get_deps(meta, "build")),
get_inner_deps(get_deps(meta, "host")),
get_inner_deps(get_deps(meta, "run")),
))
)
return dag, name2recipe
def build_from_recipes(recipes):
logger.info("Building Recipe DAG")
package2recipes = {}
recipe_list = []
for recipe in recipes:
for package in recipe.package_names:
package2recipes.setdefault(package, set()).add(recipe)
recipe_list.append(recipe)
dag = nx.DiGraph()
dag.add_nodes_from(recipe for recipe in recipe_list)
dag.add_edges_from(
(recipe2, recipe)
for recipe in recipe_list
for dep in recipe.get_deps()
for recipe2 in package2recipes.get(dep, [])
)
logger.info("Building Recipe DAG: done (%i nodes, %i edges)", len(dag), len(dag.edges()))
return dag
def filter_recipe_dag(dag, include, exclude):
"""Reduces **dag** to packages in **names** and their requirements"""
nodes = set()
for recipe in dag:
if (recipe not in nodes
and any(fnmatch(recipe.reldir, p) for p in include)
and not any(fnmatch(recipe.reldir, p) for p in exclude)):
nodes.add(recipe)
nodes |= nx.ancestors(dag, recipe)
return nx.subgraph(dag, nodes)
def filter(dag, packages):
nodes = set()
for package in packages:
if package in nodes:
continue # already got all ancestors
nodes.add(package)
try:
nodes |= nx.ancestors(dag, package)
except nx.exception.NetworkXError:
if package not in nx.nodes(dag):
logger.error("Can't find %s in dag", package)
else:
raise
return nx.subgraph(dag, nodes)
| mit | 177c060f2d39b48be7d9a59a2e26538b | 29.256757 | 93 | 0.615453 | 4.236518 | false | false | false | false |
pebble/cloudpebble | ide/utils/mailinglist.py | 2 | 1338 | import logging
import mailchimp
from django.conf import settings
logger = logging.getLogger(__name__)
mailchimp_default_list_id = settings.MAILCHIMP_LIST_ID
def add_user(user, mailing_list_id=None):
try:
mailchimp_api = mailchimp.Mailchimp(apikey=settings.MAILCHIMP_API_KEY)
except mailchimp.Error:
logger.error("Missing or invalid MAILCHIMP_API_KEY")
return
list_id = mailing_list_id or mailchimp_default_list_id
if list_id is None:
logger.error("Missing MAILCHIMP_LIST_ID")
return
try:
response = mailchimp_api.lists.subscribe(list_id,
{'email': user.email},
double_optin=False,
update_existing=False,
replace_interests=False)
logger.debug("{} was successfully subscribed to list {}".format(response['email'], list_id))
except mailchimp.ListDoesNotExistError:
logger.error("List {} does not exist".format(list_id))
except mailchimp.ListAlreadySubscribedError:
logger.info("User already subscribed to list {}".format(list_id))
except mailchimp.Error as e:
logger.error("An error occurred: {} - {}".format(e.__class__, e))
| mit | e5c41fb26fe74669f6dcc4e02a63dcab | 37.228571 | 100 | 0.591181 | 4.12963 | false | false | false | false |
benoitc/couchdbkit | couchdbkit/utils.py | 4 | 6295 | # -*- coding: utf-8 -
#
# This file is part of couchdbkit released under the MIT license.
# See the NOTICE for more information.
"""
Mostly utility functions couchdbkit uses internally that don't
really belong anywhere else in the modules.
"""
from __future__ import with_statement
import codecs
import string
from hashlib import md5
import os
import re
import sys
import urllib
try:
import ujson as json
except ImportError:
try:
import simplejson as json
except ImportError:
try:
import json
except ImportError:
raise ImportError("""simplejson isn't installed
Install it with the command:
pip install simplejson
""")
# backport relpath from python2.6
if not hasattr(os.path, 'relpath'):
if os.name == "nt":
def splitunc(p):
if p[1:2] == ':':
return '', p # Drive letter present
firstTwo = p[0:2]
if firstTwo == '//' or firstTwo == '\\\\':
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
normp = os.path.normcase(p)
index = normp.find('\\', 2)
if index == -1:
##raise RuntimeError, 'illegal UNC path: "' + p + '"'
return ("", p)
index = normp.find('\\', index + 1)
if index == -1:
index = len(p)
return p[:index], p[index:]
return '', p
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = splitunc(path)
unc_start, rest = splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
else:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
else:
relpath = os.path.relpath
def split_path(path):
parts = []
while True:
head, tail = os.path.split(path)
parts = [tail] + parts
path = head
if not path: break
return parts
VALID_DB_NAME = re.compile(r'^[a-z][a-z0-9_$()+-/]*$')
SPECIAL_DBS = ("_users", "_replicator",)
def validate_dbname(name):
""" validate dbname """
if name in SPECIAL_DBS:
return True
elif not VALID_DB_NAME.match(urllib.unquote(name)):
raise ValueError("Invalid db name: '%s'" % name)
return True
def to_bytestring(s):
""" convert to bytestring an unicode """
if not isinstance(s, basestring):
return s
if isinstance(s, unicode):
return s.encode('utf-8')
else:
return s
def read_file(fname, utf8=True, force_read=False):
""" read file content"""
if utf8:
try:
with codecs.open(fname, 'rb', "utf-8") as f:
data = f.read()
return data
except UnicodeError:
if force_read:
return read_file(fname, utf8=False)
raise
else:
with open(fname, 'rb') as f:
data = f.read()
return data
def sign_file(file_path):
""" return md5 hash from file content
:attr file_path: string, path of file
:return: string, md5 hexdigest
"""
if os.path.isfile(file_path):
content = read_file(file_path, force_read=True)
return md5(to_bytestring(content)).hexdigest()
return ''
def write_content(fname, content):
""" write content in a file
:attr fname: string,filename
:attr content: string
"""
f = open(fname, 'wb')
f.write(to_bytestring(content))
f.close()
def write_json(filename, content):
""" serialize content in json and save it
:attr filename: string
:attr content: string
"""
write_content(filename, json.dumps(content))
def read_json(filename, use_environment=False):
""" read a json file and deserialize
:attr filename: string
:attr use_environment: boolean, default is False. If
True, replace environment variable by their value in file
content
:return: dict or list
"""
try:
data = read_file(filename, force_read=True)
except IOError, e:
if e[0] == 2:
return {}
raise
if use_environment:
data = string.Template(data).substitute(os.environ)
try:
data = json.loads(data)
except ValueError:
print >>sys.stderr, "Json is invalid, can't load %s" % filename
raise
return data
| mit | 1987d88760ba66d26a013d976dba617c | 28.553991 | 88 | 0.535663 | 4.037845 | false | false | false | false |
benoitc/couchdbkit | couchdbkit/external.py | 6 | 1470 | # -*- coding: utf-8 -
#
# This file is part of couchdbkit released under the MIT license.
# See the NOTICE for more information.
import sys
from .utils import json
class External(object):
""" simple class to handle an external
ans send the response.
example:
from couchdbkit.external import External
from couchdbkit.utils import json
class Test(External):
def handle_line(self, line):
self.send_response(200,
"got message external object %s" % json.dumps(line),
{"Content-type": "text/plain"})
if __name__ == "__main__":
Test().run()
"""
def __init__(self, stdin=sys.stdin, stdout=sys.stdout):
self.stdin = stdin
self.stdout = stdout
def handle_line(self, line):
raise NotImplementedError
def write(self, line):
self.stdout.write("%s\n" % line)
self.stdout.flush()
def lines(self):
line = self.stdin.readline()
while line:
yield json.loads(line)
line = self.stdin.readline()
def run(self):
for line in self.lines():
self.handle_line(line)
def send_response(self, code=200, body="", headers={}):
resp = {
'code': code,
'body': body,
'headers': headers
}
self.write(json.dumps(resp))
| mit | 8680b2c414c7b94636d14d6763568590 | 24.344828 | 72 | 0.527891 | 4.323529 | false | false | false | false |
benoitc/couchdbkit | couchdbkit/wsgi/proxy.py | 6 | 1367 | # -*- coding: utf-8 -
#
# This file is part of couchdbkit released under the MIT license.
# See the NOTICE for more information.
import urlparse
from restkit.contrib.wsgi_proxy import HostProxy, ALLOWED_METHODS
from webob import Request
class CouchdbProxy(object):
"""\
WSGI application to proxy a couchdb server.
Simple usage to proxy a CouchDB server on default url::
from couchdbkit.wsgi import CouchdbProxy
application = CouchdbProxy()
"""
def __init__(self, uri="http://127.0.0.1:5984",
allowed_method=ALLOWED_METHODS, **kwargs):
self.proxy = HostProxy(uri, allowed_methods=allowed_method,
**kwargs)
def do_proxy(self, req, environ, start_response):
"""\
return proxy response. Can be overrided to add authentification and
such. It's better to override do_proxy method than the __call__
"""
return req.get_response(self.proxy)
def __call__(self, environ, start_response):
req = Request(environ)
if 'RAW_URI' in req.environ:
# gunicorn so we can use real path non encoded
u = urlparse.urlparse(req.environ['RAW_URI'])
req.environ['PATH_INFO'] = u.path
resp = self.do_proy(req, environ, start_response)
return resp(environ, start_response)
| mit | 46d7d28de483b17d1057feb3c8ff51d0 | 32.341463 | 76 | 0.627652 | 3.962319 | false | false | false | false |
benoitc/couchdbkit | couchdbkit/loaders.py | 6 | 3408 | # -*- coding: utf-8 -
#
# This file is part of couchdbkit released under the MIT license.
# See the NOTICE for more information.
"""
Loaders are a simple way to manage design docs in your Python application.
Loaders are compatible with couchapp script (http://github.com/couchapp/couchapp).
So it means that you can simply use couchdbkit as replacement for your python
applications with advantages of couchdbkit client. Compatibility with couchapp means that
you can also use macros to include javascript code or design doc members in your views,
shows & lists.
Loaders are FileSystemDocsLoader and FileSystemDocLoader. The first
one takes a directory and retrieve all design docs before sending them to
CouchDB. Second allow you to send only one design doc.
This module is here for compatibility reason and will be removed in 0.6.
It's replaced by couchdbkit.designer module and push* functions.
"""
from __future__ import with_statement
from .designer import document, push, pushapps, pushdocs
class BaseDocsLoader(object):
"""Baseclass for all doc loaders. """
def get_docs(self):
raise NotImplementedError
def sync(self, dbs, atomic=True, **kwargs):
raise NotImplementedError
class FileSystemDocsLoader(BaseDocsLoader):
""" Load docs from the filesystem. This loader can find docs
in folders on the filesystem and is the preferred way to load them.
The loader takes the path for design docs as a string or if multiple
locations are wanted a list of them which is then looked up in the
given order:
>>> loader = FileSystemDocsLoader('/path/to/templates')
>>> loader = FileSystemDocsLoader(['/path/to/templates', '/other/path'])
You could also do the same to loads docs.
"""
def __init__(self, designpath, docpath=None):
if isinstance(designpath, basestring):
self.designpaths = [designpath]
else:
self.designpaths = designpath
docpath = docpath or []
if isinstance(docpath, basestring):
docpath = [docpath]
self.docpaths = docpath
def get_docs(self):
docs = []
for path in self.docpaths:
ret = pushdocs(path, [], export=True)
docs.extend(ret['docs'])
for path in self.designpaths:
ret = pushapps(path, [], export=True)
docs.extend(ret['docs'])
return docs
def sync(self, dbs, atomic=True, **kwargs):
for path in self.docpaths:
pushdocs(path, dbs, atomic=atomic)
for path in self.designpaths:
pushapps(path, dbs, atomic=atomic)
class FileSystemDocLoader(BaseDocsLoader):
""" Load only one design doc from a path on the filesystem.
>>> loader = FileSystemDocLoader("/path/to/designdocfolder", "nameodesigndoc")
"""
def __init__(self, designpath, name, design_name=None):
self.designpath = designpath
self.name = name
if not design_name.startswith("_design"):
design_name = "_design/%s" % design_name
self.design_name = design_name
def get_docs(self):
return document(self.design_path, create=False,
docid=self.design_name)
def sync(self, dbs, atomic=True, **kwargs):
push(self.design_path, dbs, atomic=atomic,
docid=self.design_name)
| mit | cb911e830e05ae017c6d525fb3a36477 | 33.424242 | 89 | 0.660211 | 4.130909 | false | false | false | false |
benoitc/couchdbkit | couchdbkit/wsgi/handler.py | 6 | 4382 | # -*- coding: utf-8 -
#
# This file is part of couchdbkit released under the MIT license.
# See the NOTICE for more information.
import sys
import StringIO
import traceback
from urllib import unquote
from restkit.util import url_encode
from .. import __version__
from ..external import External
def _normalize_name(name):
return "-".join([w.lower().capitalize() for w in name.split("-")])
class WSGIRequest(object):
SERVER_VERSION = "couchdbkit/%s" % __version__
def __init__(self, line):
self.line = line
self.response_status = 200
self.response_headers = {}
self.start_response_called = False
def read(self):
headers = self.parse_headers()
length = headers.get("CONTENT_LENGTH")
if self.line["body"] and self.line["body"] != "undefined":
length = len(self.line["body"])
body = StringIO.StringIO(self.line["body"])
else:
body = StringIO.StringIO()
# path
script_name, path_info = self.line['path'][:2], self.line['path'][2:]
if path_info:
path_info = "/%s" % "/".join(path_info)
else:
path_info = ""
script_name = "/%s" % "/".join(script_name)
# build query string
args = []
query_string = None
for k, v in self.line["query"].items():
if v is None:
continue
else:
args.append((k,v))
if args: query_string = url_encode(dict(args))
# raw path could be useful
path = "%s%s" % (path_info, query_string)
# get server address
if ":" in self.line["headers"]["Host"]:
server_address = self.line["headers"]["Host"].split(":")
else:
server_address = (self.line["headers"]["Host"], 80)
environ = {
"wsgi.url_scheme": 'http',
"wsgi.input": body,
"wsgi.errors": StringIO.StringIO(),
"wsgi.version": (1, 0),
"wsgi.multithread": False,
"wsgi.multiprocess": True,
"wsgi.run_once": False,
"SCRIPT_NAME": script_name,
"SERVER_SOFTWARE": self.SERVER_VERSION,
"COUCHDB_INFO": self.line["info"],
"COUCHDB_REQUEST": self.line,
"REQUEST_METHOD": self.line["verb"].upper(),
"PATH_INFO": unquote(path_info),
"QUERY_STRING": query_string,
"RAW_URI": path,
"CONTENT_TYPE": headers.get('CONTENT-TYPE', ''),
"CONTENT_LENGTH": length,
"REMOTE_ADDR": self.line['peer'],
"REMOTE_PORT": 0,
"SERVER_NAME": server_address[0],
"SERVER_PORT": int(server_address[1]),
"SERVER_PROTOCOL": "HTTP/1.1"
}
for key, value in headers.items():
key = 'HTTP_' + key.replace('-', '_')
if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
environ[key] = value
return environ
def start_response(self, status, response_headers):
self.response_status = int(status.split(" ")[0])
for name, value in response_headers:
name = _normalize_name(name)
self.response_headers[name] = value.strip()
self.start_response_called = True
def parse_headers(self):
headers = {}
for name, value in self.line.get("headers", {}).items():
name = name.strip().upper().encode("utf-8")
headers[name] = value.strip().encode("utf-8")
return headers
class WSGIHandler(External):
def __init__(self, application, stdin=sys.stdin,
stdout=sys.stdout):
External.__init__(self, stdin=stdin, stdout=stdout)
self.app = application
def handle_line(self, line):
try:
req = WSGIRequest(line)
response = self.app(req.read(), req.start_response)
except:
self.send_response(500, "".join(traceback.format_exc()),
{"Content-Type": "text/plain"})
return
content = "".join(response).encode("utf-8")
self.send_response(req.response_status, content, req.response_headers)
| mit | 79e01aa9134fa31e7512d76d0401a6ed | 32.19697 | 78 | 0.521223 | 4.072491 | false | false | false | false |
cole/aiosmtplib | tests/test_errors.py | 1 | 4413 | """
Test error class imports, arguments, and inheritance.
"""
import asyncio
from typing import List, Tuple, Type, Union
import pytest
from hypothesis import given
from hypothesis.strategies import integers, lists, text, tuples
from aiosmtplib import (
SMTPAuthenticationError,
SMTPConnectError,
SMTPConnectTimeoutError,
SMTPDataError,
SMTPException,
SMTPHeloError,
SMTPNotSupported,
SMTPReadTimeoutError,
SMTPRecipientRefused,
SMTPRecipientsRefused,
SMTPResponseException,
SMTPSenderRefused,
SMTPServerDisconnected,
SMTPTimeoutError,
)
@given(error_message=text())
def test_raise_smtp_exception(error_message: str) -> None:
with pytest.raises(SMTPException) as excinfo:
raise SMTPException(error_message)
assert excinfo.value.message == error_message
@given(code=integers(), error_message=text())
def test_raise_smtp_response_exception(code: int, error_message: str) -> None:
with pytest.raises(SMTPResponseException) as excinfo:
raise SMTPResponseException(code, error_message)
assert issubclass(excinfo.type, SMTPException)
assert excinfo.value.code == code
assert excinfo.value.message == error_message
@pytest.mark.parametrize(
"error_class", (SMTPServerDisconnected, SMTPConnectError, SMTPConnectTimeoutError)
)
@given(error_message=text())
def test_connection_exceptions(
error_message: str, error_class: Type[SMTPException]
) -> None:
with pytest.raises(error_class) as excinfo:
raise error_class(error_message)
assert issubclass(excinfo.type, SMTPException)
assert issubclass(excinfo.type, ConnectionError)
assert excinfo.value.message == error_message
@pytest.mark.parametrize(
"error_class", (SMTPTimeoutError, SMTPConnectTimeoutError, SMTPReadTimeoutError)
)
@given(error_message=text())
def test_timeout_exceptions(
error_message: str, error_class: Type[SMTPException]
) -> None:
with pytest.raises(error_class) as excinfo:
raise error_class(error_message)
assert issubclass(excinfo.type, SMTPException)
assert issubclass(excinfo.type, asyncio.TimeoutError)
assert excinfo.value.message == error_message
@pytest.mark.parametrize(
"error_class", (SMTPHeloError, SMTPDataError, SMTPAuthenticationError)
)
@given(code=integers(), error_message=text())
def test_simple_response_exceptions(
code: int,
error_message: str,
error_class: Type[Union[SMTPHeloError, SMTPDataError, SMTPAuthenticationError]],
) -> None:
with pytest.raises(error_class) as excinfo:
raise error_class(code, error_message)
assert issubclass(excinfo.type, SMTPResponseException)
assert excinfo.value.code == code
assert excinfo.value.message == error_message
@given(code=integers(), error_message=text(), sender=text())
def test_raise_smtp_sender_refused(code: int, error_message: str, sender: str) -> None:
with pytest.raises(SMTPSenderRefused) as excinfo:
raise SMTPSenderRefused(code, error_message, sender)
assert issubclass(excinfo.type, SMTPResponseException)
assert excinfo.value.code == code
assert excinfo.value.message == error_message
assert excinfo.value.sender == sender
@given(code=integers(), error_message=text(), recipient=text())
def test_raise_smtp_recipient_refused(
code: int, error_message: str, recipient: str
) -> None:
with pytest.raises(SMTPRecipientRefused) as excinfo:
raise SMTPRecipientRefused(code, error_message, recipient)
assert issubclass(excinfo.type, SMTPResponseException)
assert excinfo.value.code == code
assert excinfo.value.message == error_message
assert excinfo.value.recipient == recipient
@given(lists(elements=tuples(integers(), text(), text())))
def test_raise_smtp_recipients_refused(addresses: List[Tuple[int, str, str]]) -> None:
errors = [SMTPRecipientRefused(*address) for address in addresses]
with pytest.raises(SMTPRecipientsRefused) as excinfo:
raise SMTPRecipientsRefused(errors)
assert issubclass(excinfo.type, SMTPException)
assert excinfo.value.recipients == errors
@given(error_message=text())
def test_raise_smtp_not_supported(error_message: str) -> None:
with pytest.raises(SMTPNotSupported) as excinfo:
raise SMTPNotSupported(error_message)
assert issubclass(excinfo.type, SMTPException)
assert excinfo.value.message == error_message
| mit | ebf9a77cb91aed2368386a92e04f9e6d | 31.932836 | 87 | 0.742579 | 3.940179 | false | true | false | false |
theonion/django-bulbs | bulbs/promotion/migrations/0002_content_list_to_pzone.py | 2 | 4862 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import json_field.fields
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
('content', '0001_initial'),
('promotion', '0001_initial'),
]
operations = [
# cleanup old models
migrations.DeleteModel(
name='LockOperation',
),
migrations.DeleteModel(
name='UnlockOperation',
),
migrations.DeleteModel(
name='InsertOperation'
),
migrations.DeleteModel(
name='ReplaceOperation'
),
migrations.DeleteModel(
name='ContentListOperation'
),
migrations.DeleteModel(
name='ContentListHistory',
),
# fix up content list which is now pzone
migrations.RenameModel(
old_name='ContentList',
new_name='PZone',
),
migrations.RenameField(
model_name='pzone',
old_name='length',
new_name='zone_length',
),
# pzone operation modifications
migrations.CreateModel(
name='PZoneOperation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('when', models.DateTimeField()),
('applied', models.BooleanField(default=False)),
],
options={
'ordering': ['-when', 'id'],
},
bases=(models.Model,),
),
migrations.AddField(
model_name='pzoneoperation',
name='content',
field=models.ForeignKey(related_name='+', to='content.Content'),
preserve_default=True,
),
migrations.AddField(
model_name='pzoneoperation',
name='polymorphic_ctype',
field=models.ForeignKey(related_name='polymorphic_promotion.pzoneoperation_set', editable=False, to='contenttypes.ContentType', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='pzoneoperation',
name='pzone',
field=models.ForeignKey(related_name='operations', to='promotion.PZone'),
preserve_default=True,
),
# delete operation modifications
migrations.CreateModel(
name='DeleteOperation',
fields=[
('pzoneoperation_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='promotion.PZoneOperation')),
],
options={
'abstract': False,
},
bases=('promotion.pzoneoperation',),
),
# insert operation modifications
migrations.CreateModel(
name='InsertOperation',
fields=[
('pzoneoperation_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='promotion.PZoneOperation')),
],
options={
'abstract': False,
},
bases=('promotion.pzoneoperation',),
),
migrations.AddField(
model_name='insertoperation',
name='index',
field=models.IntegerField(default=0),
preserve_default=True,
),
# replace operation modifications
migrations.CreateModel(
name='ReplaceOperation',
fields=[
('pzoneoperation_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='promotion.PZoneOperation')),
],
options={
'abstract': False,
},
bases=('promotion.pzoneoperation',),
),
migrations.AddField(
model_name='replaceoperation',
name='index',
field=models.IntegerField(default=0),
preserve_default=True,
),
# pzone history modifications
migrations.CreateModel(
name='PZoneHistory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('data', json_field.fields.JSONField(default=[], help_text='Enter a valid JSON object')),
('date', models.DateTimeField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='pzonehistory',
name='pzone',
field=models.ForeignKey(related_name='history', to='promotion.PZone'),
preserve_default=True,
),
]
| mit | eb463303f9febf14ebc0ad452d0a6696 | 32.07483 | 164 | 0.538256 | 4.881526 | false | false | false | false |
theonion/django-bulbs | bulbs/instant_articles/views.py | 1 | 4002 | from django.conf import settings
from django.template import RequestContext, loader
from django.template.base import TemplateDoesNotExist
from django.views.decorators.cache import cache_control
from bulbs.content.models import Content
from bulbs.content.views import BaseContentDetailView
from bulbs.feeds.views import RSSView
from bulbs.instant_articles.renderer import InstantArticleRenderer
from bulbs.instant_articles.transform import transform
class InstantArticleRSSView(RSSView):
paginate_by = 100
template_name = "feeds/instant_article_rss.xml"
feed_title = "Instant Articles RSS Feed"
def get_queryset(self):
return Content.search_objects.instant_articles()
def get_template_names(self):
return [
"feeds/instant_article_rss.xml",
"instant_article/base_instant_article_rss.xml"
]
def get_context_data(self, *args, **kwargs):
context = super(InstantArticleRSSView, self).get_context_data(*args, **kwargs)
context["title"] = self.feed_title
context["search_url"] = self.request.build_absolute_uri("/")
for content in context["page_obj"].object_list:
content.feed_url = self.request.build_absolute_uri(content.get_absolute_url())
body = getattr(content, 'body', "")
content_ctx = {
"content": content,
"absolute_uri": getattr(settings, 'WWW_URL'),
"transformed_body": transform(body, InstantArticleRenderer())
}
try:
content.instant_article_html = loader.render_to_string(
"instant_article/_instant_article.html", content_ctx
)
except TemplateDoesNotExist:
content.instant_article_html = loader.render_to_string(
"instant_article/base_instant_article.html", content_ctx
)
return RequestContext(self.request, context)
class InstantArticleContentView(BaseContentDetailView):
redirect_correct_path = False
def get_template_names(self):
return [
"instant_article/_instant_article.html",
"instant_article/base_instant_article.html"
]
def get_context_data(self, *args, **kwargs):
context = super(InstantArticleContentView, self).get_context_data(*args, **kwargs)
targeting = self.object.get_targeting()
body = getattr(self.object, 'body', "")
context["transformed_body"] = transform(body, InstantArticleRenderer())
context["targeting"] = targeting
context["absolute_uri"] = getattr(settings, "WWW_URL")
return context
class InstantArticleAdView(InstantArticleContentView):
def get_template_names(self):
return [
"instant_article/_instant_article_ad.html",
]
def get_context_data(self, *args, **kwargs):
context = super(InstantArticleAdView, self).get_context_data(*args, **kwargs)
targeting = self.object.get_targeting()
context["targeting"] = targeting
context["targeting"]["dfp_position"] = self.request.GET.get("dfp_position", None)
return context
class InstantArticleAnalyticsView(InstantArticleContentView):
def get_template_names(self):
return [
"core/_analytics.html",
]
def get_context_data(self, *args, **kwargs):
context = super(InstantArticleAnalyticsView, self).get_context_data(*args, **kwargs)
context["fire_pageview"] = True
context["platform"] = "Instant Articles"
context["path"] = self.request.GET.get("path", "")
return context
instant_article_rss = cache_control(max_age=600)(InstantArticleRSSView.as_view())
instant_article = cache_control(max_age=600)(InstantArticleContentView.as_view())
instant_article_analytics = cache_control(max_age=600)(InstantArticleAnalyticsView.as_view())
instant_article_ad = cache_control(max_age=600)(InstantArticleAdView.as_view())
| mit | a579e89af5cf278ed3882a4f51aeaace | 37.480769 | 93 | 0.661419 | 3.889213 | false | false | false | false |
theonion/django-bulbs | bulbs/campaigns/views.py | 2 | 1973 | from datetime import datetime
from rest_framework import routers, viewsets, filters
from rest_framework.permissions import IsAdminUser
from .models import Campaign
from .serializers import CampaignSerializer
from bulbs.utils.methods import get_query_params
class CampaignActiveFilter(filters.BaseFilterBackend):
"""Checks for a value for 'active' in query parameters, filters from this
based on start_date and end_date. 'active' as True will return campaigns
where start_date <= now < end_date and 'active' as False will return campaigns
where now < start_date || now >= end_date."""
def filter_queryset(self, request, queryset, view):
new_queryset = queryset
key_active = 'active'
now = datetime.now()
if key_active in get_query_params(request):
val = get_query_params(request)[key_active].lower()
if val == 'true':
# start_date <= now < end_date
new_queryset = queryset.filter(
start_date__lte=now,
end_date__gt=now
)
elif val == 'false':
# now < start_date || now >= end_date
new_queryset = \
queryset.filter(start_date__gt=now) | \
new_queryset.filter(end_date__lte=now)
return new_queryset
class CampaignViewSet(viewsets.ModelViewSet):
queryset = Campaign.objects.all()
serializer_class = CampaignSerializer
paginate_by = 10
filter_backends = (
CampaignActiveFilter,
filters.SearchFilter,
filters.OrderingFilter,)
search_fields = (
"campaign_label",
"sponsor_name",)
ordering_fields = (
"campaign_label",
"sponsor_name",
"start_date",
"end_date",)
permission_classes = [IsAdminUser]
api_v1_router = routers.DefaultRouter()
api_v1_router.register(r"campaign", CampaignViewSet, base_name="campaign")
| mit | ba57a75091511b7b84230c404a2f0b4d | 30.822581 | 82 | 0.618855 | 4.01833 | false | false | false | false |
theonion/django-bulbs | bulbs/poll/views.py | 1 | 1646 | import json
from django.http import HttpResponse
from django.views.decorators.cache import cache_control
from django.views.generic.detail import DetailView
from bulbs.content.views import BaseContentDetailView
from bulbs.poll.models import Poll
from bulbs.poll.serializers import PollPublicSerializer
class PollDetailView(BaseContentDetailView):
model = Poll
ordering_fields = "__all__"
class MergedPollDataView(DetailView):
model = Poll
def render_to_response(self, context, **response_kwargs):
"""
This endpoint sets very permiscuous CORS headers.
Access-Control-Allow-Origin is set to the request Origin. This allows
a page from ANY domain to make a request to this endpoint.
Access-Control-Allow-Credentials is set to true. This allows requesting
poll data in our authenticated test/staff environments.
This particular combination of headers means this endpoint is a potential
CSRF target.
This enpoint MUST NOT write data. And it MUST NOT return any sensitive data.
"""
serializer = PollPublicSerializer(self.object)
response = HttpResponse(
json.dumps(serializer.data),
content_type="application/json"
)
if "HTTP_ORIGIN" in self.request.META:
response["Access-Control-Allow-Origin"] = self.request.META["HTTP_ORIGIN"]
response["Access-Control-Allow-Credentials"] = 'true'
return response
poll_detail = cache_control(max_age=600)(PollDetailView.as_view())
get_merged_poll_data = cache_control(max_age=600)(MergedPollDataView.as_view())
| mit | 01302cb4f91da4c2466fe091f669f6eb | 34.021277 | 86 | 0.705954 | 4.354497 | false | false | false | false |
theonion/django-bulbs | bulbs/special_coverage/views.py | 1 | 4492 | from django.http import Http404
from django.shortcuts import get_object_or_404
from django.utils.cache import add_never_cache_headers
from django.views.decorators.cache import cache_control
from bulbs.content.views import BaseContentDetailView
from bulbs.special_coverage.models import SpecialCoverage
from bulbs.utils.methods import redirect_unpublished_to_login_or_404
class SpecialCoverageView(BaseContentDetailView):
redirect_correct_path = False
def get_template_names(self):
template_names = ["special_coverage/landing.html"]
template_names.insert(0, getattr(self.special_coverage, "custom_template_name", ""))
return template_names
def get(self, request, *args, **kwargs):
response = super(SpecialCoverageView, self).get(request, *args, **kwargs)
# Extra unpublished check on Special Coverage activation (BaseContentDetailView only checks
# first piece of content).
if not self.special_coverage.is_active:
if self.show_published_only():
raise Http404("Special Coverage does not exist.")
elif not request.user.is_staff:
return redirect_unpublished_to_login_or_404(request=request,
next_url=request.get_full_path())
# Never cache unpublished content
add_never_cache_headers(response)
return response
def get_object(self, *args, **kwargs):
self.special_coverage = get_object_or_404(SpecialCoverage, slug=self.kwargs.get("slug"))
qs = self.special_coverage.get_content(published=self.show_published_only()).full()
if qs.count() == 0:
raise Http404("No Content available in content list")
return qs[0]
def get_context_data(self, *args, **kwargs):
context = super(SpecialCoverageView, self).get_context_data()
per_page = 10
context["per_page"] = per_page
content_list = self.special_coverage.get_content(
published=self.show_published_only()
)[:100]
context["content_list_total"] = len(content_list)
context["content_list"] = content_list[:per_page]
if len(content_list) > per_page:
context["more_content"] = True
else:
context["more_content"] = False
if hasattr(self.object, "get_reading_list"):
context["reading_list"] = self.object.get_reading_list()
context["special_coverage"] = self.special_coverage
context["targeting"] = {}
try:
context["current_video"] = self.special_coverage.videos[0]
except IndexError:
context["current_video"] = None
if self.special_coverage:
context["targeting"]["dfp_specialcoverage"] = self.special_coverage.slug
if self.special_coverage.tunic_campaign_id:
context["targeting"]["dfp_campaign_id"] = self.special_coverage.tunic_campaign_id
return context
def show_published_only(self):
"""
Returns True if `full_preview` is not a query_parameter.
Used to determine unpublished preview state.
"""
return bool("full_preview" not in self.request.GET)
class SpecialCoverageLoadMoreView(SpecialCoverageView):
def get_template_names(self, *args, **kwargs):
return ["special_coverage/more.html"]
def get_context_data(self, *args, **kwargs):
per_page = 10
offset = int(self.kwargs.get("offset"))
context = super(SpecialCoverageLoadMoreView, self).get_context_data()
context["content_list"] = self.special_coverage.get_content(
published=self.show_published_only()
)[offset:offset + per_page]
return context
class SpecialCoverageVideoView(SpecialCoverageView):
def get_context_data(self, *args, **kwargs):
context = super(SpecialCoverageVideoView, self).get_context_data()
video_id = int(self.kwargs.get('video_id'))
if video_id not in self.special_coverage.videos:
raise Http404('Video with id={} not in SpecialCoverage'.format(video_id))
context['current_video'] = video_id
return context
special_coverage = cache_control(max_age=600)(SpecialCoverageView.as_view())
special_coverage_load_more = cache_control(max_age=600)(SpecialCoverageLoadMoreView.as_view())
special_coverage_video = cache_control(max_age=600)(SpecialCoverageVideoView.as_view())
| mit | 6e256d1e08957c1062e64a6abcaa170d | 36.747899 | 99 | 0.653384 | 4.0287 | false | false | false | false |
theonion/django-bulbs | bulbs/content/templatetags/content.py | 1 | 1109 | try:
from urllib.parse import urljoin, urlencode
except ImportError:
from urlparse import urljoin
from urllib import urlencode
from django import template
from django.conf import settings
register = template.Library()
@register.simple_tag
def content_tunic_campaign_url(campaign_id,
image_ratio=None,
image_width=None,
image_format=None):
url_base = "campaign/{}/public".format(campaign_id)
raw_params = {}
if image_ratio:
raw_params["image_ratio"] = image_ratio
if image_width:
raw_params["image_width"] = image_width
if image_format:
raw_params["image_format"] = image_format
url_params = "{}".format(urlencode(raw_params))
path = urljoin(
settings.TUNIC_API_PATH,
"{}?{}".format(url_base, url_params)
)
return urljoin(settings.TUNIC_BACKEND_ROOT, path)
@register.simple_tag(takes_context=True)
def build_video_share_uri(context, video_id):
return context["request"].build_absolute_uri("/v/" + format(video_id))
| mit | bbbf056c0bd554f16b9ffeebc63b7e47 | 26.725 | 74 | 0.626691 | 3.918728 | false | false | false | false |
theonion/django-bulbs | tests/feeds/test_rss_feeds.py | 1 | 4908 | from datetime import timedelta
from django.core.urlresolvers import reverse
from django.test.client import Client
from django.utils import timezone
from bulbs.special_coverage.models import SpecialCoverage
from bulbs.super_features.models import BaseSuperFeature, GUIDE_TO_HOMEPAGE
from bulbs.utils.test import BaseIndexableTestCase, make_content
from example.testcontent.models import TestContentObj
class RSSTestCase(BaseIndexableTestCase):
"""A base test case, allowing tearDown and setUp of the ES index"""
def test_rss_feed(self):
# Let's bust out some content
make_content(TestContentObj, published=timezone.now() - timedelta(hours=2), _quantity=35)
TestContentObj.search_objects.refresh()
rss_endpoint = reverse("rss-feed")
client = Client()
response = client.get(rss_endpoint)
self.assertEqual(response.status_code, 200)
first_item = response.context["page_obj"].object_list[0]
self.assertTrue(hasattr(first_item, "feed_url"))
def test_rss_pagination(self):
endpoint = reverse("rss-feed")
for i in range(40):
TestContentObj.objects.create(
title='Content #{}'.format(i),
published=timezone.now() - timezone.timedelta(days=i)
)
TestContentObj.search_objects.refresh()
client = Client()
resp = client.get(endpoint)
self.assertEqual(resp.status_code, 200)
page1_content_list = resp.context_data.get('content_list')
self.assertEqual(len(page1_content_list), 20)
resp = client.get(endpoint, {'page': 2})
self.assertEqual(resp.status_code, 200)
page2_content_list = resp.context_data.get('content_list')
self.assertEqual(len(page2_content_list), 20)
for content in page1_content_list:
self.assertNotIn(content, page2_content_list)
def test_exclude_superfeatures(self):
BaseSuperFeature.objects.create(title="Guide to Cats",
superfeature_type=GUIDE_TO_HOMEPAGE,
published=timezone.now())
BaseSuperFeature.search_objects.refresh()
resp = Client().get(reverse("rss-feed"))
self.assertEqual(resp.status_code, 200)
self.assertEqual(0, len(resp.context["page_obj"].object_list))
def test_hidden_content(self):
TestContentObj.objects.create(
title="Content1",
published=timezone.now(),
hide_from_rss=True
)
TestContentObj.objects.create(title="Content2", published=timezone.now())
TestContentObj.search_objects.refresh()
resp = Client().get(reverse("rss-feed"))
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.context["page_obj"].object_list), 1)
self.assertEqual(resp.context["page_obj"].object_list[0].title, "Content2")
def test_special_coverage_rss_feed(self):
# make content
c1 = TestContentObj.objects.create(title="Content1", published=timezone.now())
c2 = TestContentObj.objects.create(title="Content2", published=timezone.now())
TestContentObj.objects.create(title="Content3", published=timezone.now())
TestContentObj.search_objects.refresh()
# make Special Coverage & add c1 & c2 to it
sc = SpecialCoverage.objects.create(
name="Coverage",
description="Test coverage",
query={
'excluded_ids': [],
'groups': [],
'included_ids': [c1.pk, c2.pk],
'pinned_ids': []
}
)
# test slug w/ sc-rss-feed
sc_rss = reverse("sc-rss-feed")
client = Client()
response = client.get("{0}?special_coverage_slug={1}".format(sc_rss, sc.slug))
self.assertEqual(response.status_code, 200)
# verify stuff is in sc-rss-feed response
self.assertTrue("Content1" in response.content.decode('utf-8'))
self.assertTrue("Content2" in response.content.decode('utf-8'))
self.assertTrue("Content3" not in response.content.decode('utf-8'))
# test id w/ sc-rss-feed
response = client.get("{0}?special_coverage_id={1}".format(sc_rss, sc.id))
self.assertEqual(response.status_code, 200)
# verify stuff is in sc-rss-feed response
self.assertTrue("Content1" in response.content.decode('utf-8'))
self.assertTrue("Content2" in response.content.decode('utf-8'))
self.assertTrue("Content3" not in response.content.decode('utf-8'))
# test w/o id or slug
response = client.get("{0}".format(sc_rss))
self.assertEqual(response.status_code, 200)
# verify nothing is returned
object_list = response.context["page_obj"].object_list
self.assertEqual(len(object_list), 0)
| mit | c85337e8ab66fede0d2db2df0aec8f38 | 37.645669 | 97 | 0.632233 | 3.939005 | false | true | false | false |
theonion/django-bulbs | bulbs/contributions/tasks.py | 1 | 1228 | """celery tasks for contributions."""
from celery import shared_task
from bulbs.content.models import Content
from .models import Contribution, FreelanceProfile
@shared_task(default_retry_delay=5)
def check_and_update_freelanceprofiles(content_id):
content = Content.objects.get(id=content_id)
for author in content.authors.all():
profile = getattr(author, "freelanceprofile", None)
if profile is None:
FreelanceProfile.objects.create(contributor=author)
@shared_task(default_retry_delay=5)
def update_role_rates(contributor_role_pk):
for contribution in Contribution.objects.filter(role__pk=contributor_role_pk):
contribution.index()
@shared_task(default_retry_delay=5)
def run_contributor_email_report(**kwargs):
from .email import EmailReport
report = EmailReport(**kwargs)
report.send_mass_contributor_emails()
@shared_task(default_retry_delay=5)
def check_and_run_send_byline_email(content_id, new_byline):
from .email import send_byline_email
content = Content.objects.get(id=content_id)
removed_bylines = content.authors.exclude(pk__in=[c.id for c in new_byline])
if removed_bylines:
send_byline_email(content, removed_bylines)
| mit | dac6fa361e6dc0c02f4fc31adbbff24e | 33.111111 | 82 | 0.735342 | 3.439776 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.