text stringlengths 0 1.05M | meta dict |
|---|---|
from functools import lru_cache
from collections import deque
from dataclasses import dataclass, field
import heapq
@dataclass(frozen=True)
class Point:
x: int
y: int
def dist(self, other):
return abs(self.x - other.x) + abs(self.y - other.y)
class PriorityQueue:
@dataclass(frozen=True, order=True)
class QueuedItem:
utility: int
item: object = field(compare=False)
def __init__(self, utility_func):
self._utility_func = utility_func
self._heap = []
def __bool__(self):
return bool(self._heap)
def push(self, item):
heapq.heappush(
self._heap,
self.QueuedItem(self._utility_func(item), item)
)
def pop(self):
qi = heapq.heappop(self._heap)
return qi.item
@lru_cache()
def is_wall(p, seed):
v = p.x*p.x + 3*p.x + 2*p.x*p.y + p.y + p.y*p.y
#v = x*(x+3+2*y) + y*(1+y)
v += seed
bitcount = 0
while v > 0:
if v&1:
bitcount += 1
v >>= 1
return bitcount&1
def print_map(seed, w, h, visited=None):
from itertools import cycle
if visited is None:
visited = {}
def point_str(p):
if p in visited:
return "O"
elif is_wall(p, seed):
return "#"
else:
return "."
header = ''.join(str(a) for a, b in zip(cycle('0123456789'), range(0, w)))
print(' '+header)
for y in range(0, h):
row = ''.join(point_str(Point(x, y)) for x in range(0, w))
print(str(y%10)+' '+row)
delta = (
(1, 0),
(0, 1),
(-1, 0),
(0, -1),
)
def moves(p, seed):
for dx, dy in delta:
np = Point(p.x+dx, p.y+dy)
if np.x >= 0 and np.y >= 0 and not is_wall(np, seed):
yield np
def solve_p1(seed, target):
queue = PriorityQueue(target.dist)
queue.push(Point(1, 1))
# Strategy: bfs but sort what goes on the search stack by distance to target
visited = {
Point(1, 1): 0
}
while queue:
p = queue.pop()
if p == target:
return visited[p]
for m in moves(p, seed):
if m not in visited:
queue.push(m)
visited[m] = visited[p] + 1
def solve_p2(seed):
print_map(seed, 50, 50)
start = Point(1, 1)
queue = PriorityQueue(lambda i: -start.dist(i))
queue.push(start)
# Strategy: bfs but sort what goes on the search stack by distance to target
visited = {
start: 0
}
while queue:
p = queue.pop()
if visited[p] > 49:
continue
for m in moves(p, seed):
if m not in visited:
queue.push(m)
visited[m] = visited[p] + 1
print_map(seed, 50, 50, visited)
return len(visited)
if __name__ == '__main__':
print("Part 1 Solution")
target = Point(31, 39)
print(solve_p1(1364, target))
print("Part 2 Solution")
print(solve_p2(1364))
| {
"repo_name": "suprzer0/aoc2016",
"path": "day13/solve.py",
"copies": "1",
"size": "3000",
"license": "mit",
"hash": -926780763725494900,
"line_mean": 20.7391304348,
"line_max": 80,
"alpha_frac": 0.5243333333,
"autogenerated": false,
"ratio": 3.239740820734341,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42640741540343413,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from contextlib import contextmanager
import os
import glob
from hashlib import sha1
from scipy import sparse
import numpy as np
from scipy.integrate import simps
from scipy.ndimage import spline_filter
from astropy.table import Table
from astropy.coordinates import ICRS
import astropy.units as u
# do this first, since SdfError called in config
# TODO: presumably there is a way to avoid this...
class SdfError(Exception):
"""Use this for non-standard errors."""
pass
from . import photometry
from . import config as cfg
c_micron = u.micron.to(u.Hz,equivalencies=u.spectral())
@contextmanager
def pushd(new_dir):
"""A context manager that implements the `pushd` command.
This lets you run a block of commands while in a different
directory.
From https://gist.github.com/theY4Kman/3583442
"""
old_dir = os.getcwd()
os.chdir(new_dir)
try:
yield old_dir
finally:
os.chdir(old_dir)
def bnu_wav_micron(wav_um,temp):
"""Return a Planck function, avoiding overflows."""
k1 = 3.9728949e19
k2 = 14387.69
fact1 = k1/(wav_um**3)
fact2 = k2/(wav_um*temp)
if isinstance(wav_um,np.ndarray):
ofl = fact2 < 709
bnu = np.zeros(len(wav_um)) + cfg.tiny
if np.any(ofl) == False:
return bnu
else:
bnu[ofl] = fact1[ofl]/(np.exp(fact2[ofl])-1.0)
return bnu
else:
if fact2 > 709:
return cfg.tiny
else:
return fact1/(np.exp(fact2)-1.0)
def bnu_nu_hz(nu_hz,temp):
wav_um = c_micron / nu_hz
return bnu_wav_micron(wav_um,temp)
def sdf_int(y,x):
"""Decide how we do integration in sdf."""
return np.trapz(y,x)
# return simps(y,x)
def validate_1d(value,expected_len,dtype=float):
if type(value) in [list, tuple]:
value = np.array(value,dtype=dtype)
if value is None:
value = value
elif isinstance(value, np.ndarray) and value.ndim == 1:
if expected_len is not None:
if len(value) != expected_len:
raise SdfError("incorrect length (expected {0} but found {1})".format(expected_len, len(value)))
if value.dtype != dtype:
value = np.array(value,dtype=dtype)
else:
raise TypeError("should be a 1-d sequence")
return value
def validate_nd(value,expected_dim,dtype=float):
if value is None:
value = value
elif isinstance(value, np.ndarray):
if expected_dim is not None:
if value.ndim != expected_dim:
raise SdfError("incorrect dimension (expected {0} but found {1})".format(expected_dim, value.ndim))
if value.dtype != dtype:
value = np.array(value,dtype=dtype)
else:
raise TypeError("should be an n-d ndarray")
return value
def validate_string(value):
if value is None:
return value
elif isinstance(value, str):
return value
else:
raise TypeError("should be a string")
def validate_dict(value):
if value is None:
return value
elif isinstance(value, dict):
return value
else:
raise TypeError("should be a dict")
def validate_float(value):
if value is None:
return value
elif isinstance(value, float):
return value
else:
raise TypeError("{} should be a float".format(value))
def validate_function(value):
if value is None:
return value
elif callable(value):
return value
else:
raise TypeError("should be a function")
def validate_quantity(value):
if value is None:
return value
elif isinstance(value, u.Quantity):
return value
else:
raise TypeError("should be an astropy units Quantity")
def resample_matrix(wave_in,new_wave,old_R=np.inf,kern_width=5):
"""Return a resampling/convolution kernel.
Copied from Andy Casey's sick code, modified to ensure the range of
indices included near each resampled wavelength are appropriate
(rather than a single value for the whole spectrum).
The new wavelength grid is assumed (and checked) to be at least
close to uniform in resolution over the whole range.
Parameters
----------
wave_in : numpy.ndarray
Wavelengths of the spectrum to be resampled.
new_wave : numpy.ndarray
Wavelengths to resample to.
old_R : int, optional
Resolution of the spectrum to be resampled.
kern_width : int, optional
Width (units of sigma) of convolution kernel at each point.
Returns
-------
kernel : 2d numpy.ndarray (or equivalent)
Convolution kernel, multiply the spectrum by this to convolve.
See Also
--------
spectrum.resample
"""
# this will be the shape of the convolution matrix
N, M = (new_wave.size, wave_in.size)
# figure out what the desired median resolution is, checking the
# range isn't too large (excluding the last point)
dlambda = np.diff(new_wave)
lambdas = (new_wave[1:] + new_wave[:-1])/2.0
new_Rs = lambdas / dlambda
if 2 * np.min(new_Rs[:-1]) < np.max(new_Rs[:-1]):
raise SdfError("wavelength grid has too wide a range of resolutions "
" to be resampled this way ({} to {}) {}".
format(np.min(new_Rs),np.max(new_Rs),new_Rs))
new_R = np.median(new_Rs)
# width of the kernel (dlambda=lambda/R) at each point
fwhms = new_wave / float(new_R)
if np.isfinite(old_R):
assert old_R > new_R
fwhms -= new_wave / float(old_R)
# 2.355 ~= 2 * sqrt(2*log(2))
sigmas = fwhms/2.3548200450309493
# approx center indices for new wavs in old
ioff = wave_in.searchsorted(new_wave)
ioff = np.clip(ioff,0,M-1)
# index ranges covered by convolution at a given point
dlambda = np.diff(wave_in)
dlambda = np.append(dlambda,dlambda[-1])
dlambda = dlambda[ioff]
nkern = np.ceil(kern_width * sigmas / dlambda).astype(int)
# For +/- N_kernel_pixels at each point, calculate the kernel
# and retain the indices.
x_indices = np.array([],dtype=int)
pdf = np.array([],dtype=int)
for i in range(N):
xtmp = np.arange(ioff[i] - nkern[i],ioff[i] + nkern[i],1)
xtmp = np.clip(xtmp,0,M-1)
x_indices = np.append(x_indices,xtmp)
pdftmp = np.exp(-(wave_in[xtmp] - new_wave[i])**2/(2.*sigmas[i]**2))
# die if no weights in kernel, arises from new wavelength grid
# point having no nearby points in old grid
if pdftmp.sum() == 0.0:
raise SdfError("{} {} {} {} {} {} {} {} {}".format(i,N,ioff[i],nkern[i],pdftmp,xtmp,wave_in[xtmp],new_wave[i],sigmas[i]))
pdftmp /= pdftmp.sum()
pdf = np.append(pdf,pdftmp)
y_indices = np.repeat(np.arange(N), 2 * nkern)
return sparse.coo_matrix((pdf, (x_indices, y_indices)), shape=(M, N))
def get_sdb_keywords(file):
"""Get keywords from a sdb file."""
t = Table.read(file,format='ascii.ipac')
kw = {}
for key in t.meta['keywords'].keys():
if t.meta['keywords'][key]['value'] == 'None':
kw[key] = None
else:
kw[key] = t.meta['keywords'][key]['value']
return kw
def rawphot_path(sdbid,allow_private=False):
"""Resolve the path of an sdbid's raw photometry file.
Parameters
----------
sdbid : str
The sdbid for which the file path is desired.
allow_private : bool, optional
Allow a path with private photometry to be returned.
"""
root = cfg.file['sdb_root']+'masters/'+sdbid+'/'
# see if a public directory exists
loc = root+'public/'+sdbid+'-rawphot.txt'
if os.path.exists(loc):
return loc
if allow_private:
locs = glob.glob(root+'*/'+sdbid+'-rawphot.txt')
if locs:
return locs[0]
def uvby_convert(by,m1,c1):
"""Convert Stromgren photometry according to Bessell 2011.
The coefficients are to convert synthetic photometry to observed,
I_std = a_0 + a_1 * I_syn, so need to be inverted as we want to
convert observed photometry to agree with synthetic.
"""
# coeffecients from Table 2
by_1 = [-0.007,0.997]
by_2 = [ 0.004,0.979]
m1_1 = [ 0.005,0.963]
m1_2 = [ 0.011,0.951]
c1_1 = [-0.016,0.994]
c1_2 = [-0.003,1.018]
if by < 0.5:
by_out = (by - by_1[0])/by_1[1]
m1_out = (m1 - m1_1[0])/m1_1[1]
c1_out = (c1 - c1_1[0])/c1_1[1]
else:
by_out = (by - by_2[0])/by_2[1]
m1_out = (m1 - m1_2[0])/m1_2[1]
c1_out = (c1 - c1_2[0])/c1_2[1]
return by_out,m1_out,c1_out
def plot_err(a,e_a,b,e_b):
"""Return x,y arrays to use a lines for error bars."""
c = []
d = []
err_c = []
err_d = []
for x, xerr, y, yerr in zip(a,e_a,b,e_b):
c.append((x, x))
d.append((y, y))
if x - xerr > 0:
err_c.append((x - xerr, x + xerr))
else:
err_c.append((cfg.tiny, x + xerr))
if y - yerr > 0:
err_d.append((y - yerr, y + yerr))
else:
err_d.append((cfg.tiny, y + yerr))
return c,err_c,d,err_d
def plot_join_line(data,dup_col,x_col,y_col):
"""Return x,y arrays to join duplicates in a data dictionary.
Parameters
----------
data : dict
Data dictionary.
dup_col : str
Name of key to find duplicates in.
x_col : str
Name of column to extract x data from.
y_col : str
Name of column to extract y data from.
"""
uniq_id = np.unique(data[dup_col])
xs = []
ys = []
if len(uniq_id) == len(data[dup_col]):
return None,None
for id in uniq_id:
dup = np.where(data[dup_col] == id)[0]
if len(dup) > 1:
xs.append(data[x_col][dup].tolist())
ys.append(data[y_col][dup].tolist())
return xs,ys
def linterp(newx,x,y):
"""Linear interpolation."""
hi = x.searchsorted(newx)
lo = hi - 1
return y[lo] + (y[hi]-y[lo])*(newx-x[lo])/(x[hi]-x[lo])
def rnd1sf(x):
"""Round numbers to 1 s.f. (based on first if more than 1)."""
# TODO: make this neater
# give zeros if anything is wrong
if not np.all(np.isfinite(x)) or np.min(x) <= 0:
return np.zeros(len(x))
else:
return np.round(x, -np.int(np.floor(np.log10(np.abs(x[0])))))
def iau_coord(ra,dec):
"""Return an IAU-style coordinate string.
Parameters
----------
ra : float
Right ascension in degrees.
dec : float
Declination in degrees.
"""
c = ICRS(ra*u.degree, dec*u.degree)
return 'J{0}{1}'.format(c.ra.to_string(unit=u.hourangle, sep='',
precision=2, pad=True),
c.dec.to_string(sep='', precision=2,
alwayssign=True, pad=True))
@lru_cache(maxsize=16)
def spline_filter_mem(arr,order=None):
"""Filter array, memoizing result.
Is passed an array using the hashable wrapper.
"""
return spline_filter(arr.unwrap(),order=order)
class hashable(object):
"""Hashable wrapper for ndarray objects.
Instances of ndarray are not hashable, meaning they cannot be added to
sets, nor used as keys in dictionaries. This is by design - ndarray
objects are mutable, and therefore cannot reliably implement the
__hash__() method.
The hashable class allows a way around this limitation. It implements
the required methods for hashable objects in terms of an encapsulated
ndarray object. This can be either a copied instance (which is safer)
or the original object (which requires the user to be careful enough
not to modify it).
From https://gist.github.com/marquisthunder/9ef974477e1aef9dbb41
Modified to compare hashes, rather than the whole array for __eq__.
"""
def __init__(self, wrapped, tight=False):
"""Creates a new hashable object encapsulating an ndarray.
wrapped
The wrapped ndarray.
tight
Optional. If True, a copy of the input ndaray is created.
Defaults to False.
"""
self.__tight = tight
self.__wrapped = np.array(wrapped) if tight else wrapped
self.__hash = int(sha1(wrapped.view(np.uint8)).hexdigest(), 16)
def __eq__(self, other):
return self.__hash__() == other.__hash__()
# return np.all(self.__wrapped == other.__wrapped)
def __hash__(self):
return self.__hash
def unwrap(self):
"""Returns the encapsulated ndarray.
If the wrapper is "tight", a copy of the encapsulated ndarray is
returned. Otherwise, the encapsulated ndarray itself is returned.
"""
if self.__tight:
return np.array(self.__wrapped)
return self.__wrapped
def get_herschel_obsid(obs):
"""Return a Herschel ObdID (if one exists) given a photometry object."""
obsids = np.array([])
for p in obs:
if isinstance(p, photometry.Photometry):
for i,f in enumerate(p.filters):
if f in ['PACS70','PACS100','PACS160']:
if 'HSA_GMK' in p.bibcode[i]:
obsids = np.append(obsids, p.note[i])
return np.unique(obsids)
| {
"repo_name": "drgmk/sdf",
"path": "sdf/utils.py",
"copies": "1",
"size": "13582",
"license": "mit",
"hash": -3233368995377309000,
"line_mean": 28.1459227468,
"line_max": 133,
"alpha_frac": 0.5853335297,
"autogenerated": false,
"ratio": 3.4263370332996974,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4511670562999697,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from faker import Faker
from jinja2 import Template
class StringGenerator:
"""Sometimes in templates you want a reference to a variable to
call a function.
For example:
>>> x = template_utils.StringGenerator(datetime.today().isoformat)
>>> print(f"{x}")
2019-09-23T11:49:01.994453
>>> x = template_utils.StringGenerator(lambda:str(random.random()))
>>> print(f"{x}")
0.795273959965055
>>> print(f"{x}")
0.053061903749985206
"""
def __init__(self, func):
self.func = func
def __str__(self):
return self.func()
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
class FakerTemplateLibrary:
"""A Jinja template library to add the faker.xyz objects to templates"""
_faker = None
def __init__(self, locale=None):
self.locale = locale
self.faker = Faker(self.locale)
def __getattr__(self, name):
return StringGenerator(
lambda *args, **kwargs: self.faker.format(name, *args, **kwargs)
)
faker_template_library = FakerTemplateLibrary()
Template = lru_cache(512)(Template)
def format_str(value, variables=None, fake=faker_template_library):
variables = variables or {}
if isinstance(value, str) and "{" in value:
value = Template(value).render(fake=fake, **variables)
return value
| {
"repo_name": "SalesforceFoundation/CumulusCI",
"path": "cumulusci/core/template_utils.py",
"copies": "1",
"size": "1409",
"license": "bsd-3-clause",
"hash": 521737387662944300,
"line_mean": 22.8813559322,
"line_max": 76,
"alpha_frac": 0.6352022711,
"autogenerated": false,
"ratio": 3.622107969151671,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9757310240251671,
"avg_score": 0,
"num_lines": 59
} |
from functools import lru_cache
from hashlib import md5
from itertools import product
salt = b'qzyelonm'
lookahead_distance = 1000
def get_hex_digest(b):
return md5(b).hexdigest()
def first_repeat_char(hash_str, l=3):
matches = sorted((hash_str.find(c*l), c) for c in set(hash_str) if c*l in hash_str)
return matches[0][1] if matches else []
@lru_cache(maxsize=lookahead_distance)
def hash_bytes(b, n):
return b + str(n).encode()
@lru_cache(maxsize=lookahead_distance)
def get_iter_hex_digest(b, iterations=0):
for _ in range(iterations+1):
b = get_hex_digest(b).encode()
return b.decode()
i = 0
keys = []
key_stretch_rounds = 2016 # part 1: 0; part 2: 2016
while len(keys) < 64:
digest = get_iter_hex_digest(hash_bytes(salt, i), key_stretch_rounds)
if any((index, char) for index, char in product(range(i+1, i+1+lookahead_distance), first_repeat_char(digest))
if char*5 in get_iter_hex_digest(hash_bytes(salt, index), key_stretch_rounds)):
print(len(keys), i)
keys.append(i)
i += 1
print('\n\n', keys[-1], '\n\n')
| {
"repo_name": "sclarke/adventofcode2016",
"path": "d14.py",
"copies": "1",
"size": "1100",
"license": "bsd-3-clause",
"hash": -853456757248792700,
"line_mean": 25.1904761905,
"line_max": 114,
"alpha_frac": 0.6554545455,
"autogenerated": false,
"ratio": 2.9177718832891246,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.906639737057481,
"avg_score": 0.0013658116428629668,
"num_lines": 42
} |
from functools import lru_cache
from hashlib import md5
from time import time
from django.core.cache import cache
from django.utils.cache import _i18n_cache_key_suffix
from mezzanine.conf import settings
from mezzanine.utils.conf import middlewares_or_subclasses_installed
from mezzanine.utils.sites import current_site_id
def _hashed_key(key):
"""
Hash keys when talking directly to the cache API, to avoid
keys longer than the backend supports (eg memcache limit is 255)
"""
return md5(key.encode("utf-8")).hexdigest()
def cache_set(key, value, timeout=None, refreshed=False):
"""
Wrapper for ``cache.set``. Stores the cache entry packed with
the desired cache expiry time. When the entry is retrieved from
cache, the packed expiry time is also checked, and if past,
the stale cache entry is stored again with an expiry that has
``CACHE_SET_DELAY_SECONDS`` added to it. In this case the entry
is not returned, so that a cache miss occurs and the entry
should be set by the caller, but all other callers will still get
the stale entry, so no real cache misses ever occur.
"""
if timeout is None:
timeout = settings.CACHE_MIDDLEWARE_SECONDS
refresh_time = timeout + time()
real_timeout = timeout + settings.CACHE_SET_DELAY_SECONDS
packed = (value, refresh_time, refreshed)
return cache.set(_hashed_key(key), packed, real_timeout)
def cache_get(key):
"""
Wrapper for ``cache.get``. The expiry time for the cache entry
is stored with the entry. If the expiry time has past, put the
stale entry back into cache, and don't return it to trigger a
fake cache miss.
"""
packed = cache.get(_hashed_key(key))
if packed is None:
return None
value, refresh_time, refreshed = packed
if (time() > refresh_time) and not refreshed:
cache_set(key, value, settings.CACHE_SET_DELAY_SECONDS, True)
return None
return value
@lru_cache(maxsize=None)
def cache_installed():
"""
Returns ``True`` if a cache backend is configured, and the
cache middleware classes or subclasses thereof are present.
This will be evaluated once per run, and then cached.
"""
has_key = bool(getattr(settings, "NEVERCACHE_KEY", ""))
return (
has_key
and settings.CACHES
and not settings.TESTING
and middlewares_or_subclasses_installed(
[
"mezzanine.core.middleware.UpdateCacheMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
]
)
)
def cache_key_prefix(request):
"""
Cache key for Mezzanine's cache middleware. Adds the current
site ID.
"""
cache_key = "{}.{}.{}".format(
settings.CACHE_MIDDLEWARE_KEY_PREFIX,
current_site_id(),
# This last part used to indicate the device type for the request,
# but device detection was removed in Mezzanine 4.3.
# The "default" value was kept to maintain existing cache keys.
# See: https://github.com/stephenmcd/mezzanine/pull/1783
"default",
)
return _i18n_cache_key_suffix(request, cache_key)
def nevercache_token():
"""
Returns the secret token that delimits content wrapped in
the ``nevercache`` template tag.
"""
return "nevercache." + settings.NEVERCACHE_KEY
def add_cache_bypass(url):
"""
Adds the current time to the querystring of the URL to force a
cache reload. Used for when a form post redirects back to a
page that should display updated content, such as new comments or
ratings.
"""
if not cache_installed():
return url
hash_str = ""
if "#" in url:
url, hash_str = url.split("#", 1)
hash_str = "#" + hash_str
url += "?" if "?" not in url else "&"
return url + "t=" + str(time()).replace(".", "") + hash_str
| {
"repo_name": "stephenmcd/mezzanine",
"path": "mezzanine/utils/cache.py",
"copies": "2",
"size": "3906",
"license": "bsd-2-clause",
"hash": 6980224498068520000,
"line_mean": 32.1016949153,
"line_max": 74,
"alpha_frac": 0.6592421915,
"autogenerated": false,
"ratio": 3.917753259779338,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 118
} |
from functools import lru_cache
from io import BytesIO
from urllib.parse import urljoin
from PIL import Image
from urllib.request import Request, urlopen
import asyncio
from boards.database import engine
from bs4 import BeautifulSoup
import aiohttp
user_agent = 'Mozilla/5.0 (X11; Linux x86_64; rv:40.0) Gecko/20100101 Firefox/40.0'
request_headers = {
'User-Agent': user_agent
}
class HeadRequest(Request):
def get_method(self):
return 'HEAD'
def get_content_type(url):
"""
Get the content type of a url without downloading
:return: lowercase content type of the url
:rtype: string
"""
request = HeadRequest(url, data=None, headers=request_headers)
response = urlopen(request)
response_headers = dict(response.info())
headers_lowercase = {k.lower(): v for k, v in response_headers.items()}
return headers_lowercase.get('content-type').lower()
@lru_cache(maxsize=128)
@asyncio.coroutine
def get_image_dimensions(url, referer=None): # modified version of getImageInfo from bfg-pages
"""
Get dimensions from a partial image download from a url
:param url: URL of the image
:return: (url, width, height)
:rtype: (string, number, number)
"""
headers = {
'User-Agent': user_agent,
'Range': 'bytes=0-1024'
}
if referer:
headers['Referer'] = referer
try:
response = yield from aiohttp.request('get', url, headers=headers)
data = yield from response.content.read(1024)
response.close()
except (OSError, ValueError): # Couldn't read
return
if not data:
return
try:
image = Image.open(BytesIO(data))
if image:
return url, image.size[0], image.size[1]
except Exception as e:
print(str(e) + ' at ' + url)
return
def is_square(dimensions, max_ratio=1.0):
"""
:param dimensions: (width, height)
:type dimensions: (number, number)
:param max_ratio: Maximum aspect ratio.
:return: True if the dimensions represent a square
"""
width, height = dimensions[0], dimensions[1]
if width == 0 or height == 0:
return False
return 1/max_ratio <= width / height <= max_ratio
def find_largest_square_image(url, max_ratio=1.0, min_area=0):
"""
Find the largest square image on the page
:param url: URL to an HTML page
:param max_ratio: Maximum aspect ratio. Must be greater than 1
:return: URL of the largest image, None if no square images exist
"""
request = Request(url, headers=request_headers)
html = urlopen(request).read()
soup = BeautifulSoup(html, 'html.parser')
images = soup.find_all('img', src=True)
urls = [urljoin(url, img['src']) for img in images]
biggest_src = None
biggest_area = 0
coroutines = [get_image_dimensions(img_url) for img_url in urls]
results = asyncio.get_event_loop().run_until_complete(asyncio.gather(*coroutines))
for result in results:
if result is not None:
img_url, width, height = result
if is_square((width, height), max_ratio=max_ratio):
area = width*height
if area > biggest_area and area > min_area:
biggest_area = area
biggest_src = img_url
return biggest_src
def create_thumbnail(url, post_id):
"""
Creates a thumbnail for a post
:returns path to the thumbnail
"""
content_type = get_content_type(url)
if 'image/' in content_type:
try:
request = Request(url, data=None, headers=request_headers)
img_bytes = BytesIO(urlopen(request).read())
og_image = Image.open(img_bytes)
except OSError: # Usually happens when link wasn't an image
return
elif 'text/html' in content_type:
url = find_largest_square_image(url, max_ratio=2, min_area=2000)
request = Request(url, data=None, headers=request_headers)
img_bytes = BytesIO(urlopen(request).read())
og_image = Image.open(img_bytes)
else:
return
og_image.thumbnail((128, 128))
path = 'static/images/thumbnails/%d.jpg' % post_id
og_image.save('boards/' + path, 'JPEG', quality=85)
engine.execute('UPDATE posts SET thumbnail=\'%s\' WHERE id=%d' % ('/' + path, post_id))
| {
"repo_name": "mstojcevich/Boards",
"path": "boards/thumbnail.py",
"copies": "1",
"size": "4332",
"license": "mit",
"hash": -1695060703479431400,
"line_mean": 31.0888888889,
"line_max": 95,
"alpha_frac": 0.6350415512,
"autogenerated": false,
"ratio": 3.7025641025641027,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48376056537641027,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from io import BytesIO
import pygame
import requests
class ChatImage:
def __init__(self, client_id, height=16):
self.session = requests.Session()
self.session.headers["Client-ID"] = client_id
self.session.headers["Accept"] = "application/vnd.twitchtv.v5+json"
self.img_height = height
def load_from_url(self, url):
resp = self.session.get(url)
f = BytesIO(resp.content)
surface = pygame.image.load(f)
ratio = self.img_height / float(surface.get_height())
new_size = (int(surface.get_width() * ratio), self.img_height)
resized = pygame.transform.scale(surface, new_size)
if not pygame.display.get_init():
return resized
else:
return resized.convert_alpha()
class TwitchEmotes(ChatImage):
def __init__(self, client_id, height):
super().__init__(client_id=client_id, height=height)
result = self.session.get("https://api.twitch.tv/kraken/chat/emoticons").json()
self.emote_map = {str(e["id"]): e["images"]["url"] for e in result["emoticons"]}
@lru_cache(maxsize=1000)
def get(self, code):
url = self.emote_map.get(code)
if url:
return self.load_from_url(self.emote_map[code])
class TwitchBadges(ChatImage):
def __init__(self, client_id, height):
super().__init__(client_id=client_id, height=height)
self.global_badges = self.session.get(
"https://badges.twitch.tv/v1/badges/global/display?language=en"
).json()["badge_sets"]
self.badge_map = {}
def _get_channel_badges(self, channel_id):
if channel_id not in self.badge_map:
self.badge_map[channel_id] = self.session.get(
f"https://api.twitch.tv/kraken/chat/{channel_id}/badges"
).json()
return self.badge_map[channel_id]
else:
return self.badge_map[channel_id]
@lru_cache(maxsize=1000)
def get(self, channel_id, badge_type):
badge_map = self._get_channel_badges(channel_id)
badge, version = badge_type.split("/")
if badge_map.get(badge):
url = badge_map[badge].get("image")
return self.load_from_url(url)
else:
badge_url = self.global_badges[badge]["versions"][version]["image_url_4x"]
return self.load_from_url(badge_url)
| {
"repo_name": "shughes-uk/twitchchat_display",
"path": "twitchchat_display/images.py",
"copies": "1",
"size": "2426",
"license": "mit",
"hash": -507086209099794400,
"line_mean": 35.2089552239,
"line_max": 88,
"alpha_frac": 0.604286892,
"autogenerated": false,
"ratio": 3.4025245441795233,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9504267119441211,
"avg_score": 0.0005088633476623785,
"num_lines": 67
} |
from functools import lru_cache
from io import TextIOWrapper
from typing import Dict
import pytz
@lru_cache(maxsize=None)
def get_canonical_timezone_map() -> Dict[str, str]:
canonical = {}
with TextIOWrapper(
pytz.open_resource("tzdata.zi") # type: ignore[attr-defined] # Unclear if this is part of the public pytz API
) as f:
for line in f:
if line.startswith("L "):
l, name, alias = line.split()
canonical[alias] = name
return canonical
def canonicalize_timezone(key: str) -> str:
return get_canonical_timezone_map().get(key, key)
# Note: some of these abbreviations are fundamentally ambiguous (see
# zerver/tests/test_timezone.py), so you should never rely on them as
# anything more than a heuristic.
common_timezones = {
"SST": -39600,
"HST": -36000,
"AKST": -32400,
"HDT": -32400,
"AKDT": -28800,
"PST": -28800,
"MST": -25200,
"PDT": -25200,
"CST": -21600,
"MDT": -21600,
"CDT": -18000,
"EST": -18000,
"AST": -14400,
"EDT": -14400,
"NST": -12600,
"ADT": -10800,
"NDT": -9000,
"GMT": 0,
"UTC": 0,
"WET": 0,
"BST": +3600,
"CET": +3600,
"MET": +3600,
"WAT": +3600,
"WEST": +3600,
"CAT": +7200,
"CEST": +7200,
"EET": +7200,
"MEST": +7200,
"SAST": +7200,
"EAT": +10800,
"EEST": +10800,
"IDT": +10800,
"MSK": +10800,
"PKT": +18000,
"IST": +19800,
"WIB": +25200,
"AWST": +28800,
"HKT": +28800,
"WITA": +28800,
"JST": +32400,
"KST": +32400,
"WIT": +32400,
"ACST": +34200,
"AEST": +36000,
"ChST": +36000,
"ACDT": +37800,
"AEDT": +39600,
"NZST": +43200,
"NZDT": +46800,
}
| {
"repo_name": "zulip/zulip",
"path": "zerver/lib/timezone.py",
"copies": "6",
"size": "1757",
"license": "apache-2.0",
"hash": -4953415391975245000,
"line_mean": 21.2405063291,
"line_max": 118,
"alpha_frac": 0.5350028458,
"autogenerated": false,
"ratio": 2.6865443425076454,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00010637166258908626,
"num_lines": 79
} |
from functools import lru_cache
from itertools import chain
import logging
import os
import sublime
from . import events, persist, util
logger = logging.getLogger(__name__)
COLORIZE = True
WHITE_SCOPE = 'region.whitish' # hopefully a white color
DEFAULT_STYLES = None # holds the styles we ship as the default settings
@events.on('plugin_loaded')
def on_plugin_loaded():
read_gutter_theme()
@events.on('settings_changed')
def on_settings_changed(settings, **kwargs):
clear_caches()
if settings.has_changed('gutter_theme'):
read_gutter_theme()
def read_gutter_theme():
global COLORIZE
COLORIZE = True
theme_path = persist.settings.get('gutter_theme')
theme_file = os.path.basename(theme_path)
if not theme_file.endswith(".gutter-theme"):
theme_file += ".gutter-theme"
theme_files = sublime.find_resources(theme_file)
if theme_files:
theme_file = theme_files[0]
opts = util.load_json(theme_file)
if opts:
COLORIZE = opts.get("colorize", True)
def clear_caches():
get_value_.cache_clear()
get_icon_.cache_clear()
def get_value(key, error, default=None):
linter, code, error_type = error['linter'], error['code'], error['error_type']
return get_value_(key, linter, code, error_type, default)
@lru_cache(maxsize=128)
def get_value_(key, linter, code, error_type, default):
linter_styles = persist.settings.get('linters', {}).get(linter, {}).get('styles', [])
global_styles = persist.settings.get('styles', [])
for style_definition in linter_styles:
if code in style_definition.get('codes', []):
try:
return style_definition[key]
except KeyError:
...
for style_definition in linter_styles:
# For linter_styles, do not auto fill 'types' if the user already
# provided 'codes'
default = [] if 'codes' in style_definition else [error_type]
if error_type in style_definition.get('types', default):
try:
return style_definition[key]
except KeyError:
...
default_styles = get_default_styles()
for style_definition in chain(global_styles, default_styles):
if error_type in style_definition.get('types', [error_type]):
try:
return style_definition[key]
except KeyError:
...
return default
def get_default_styles():
# Using `yield from` to load the defaults on first usage, possibly never.
global DEFAULT_STYLES
if DEFAULT_STYLES is None:
try:
defaults = util.load_json(
'SublimeLinter.sublime-settings', from_sl_dir=True)
except Exception:
logger.warning("Could not load the default styles")
DEFAULT_STYLES = []
else:
DEFAULT_STYLES = defaults.get('styles', [])
yield from DEFAULT_STYLES
def get_icon(error):
# type: (persist.LintError) -> str
linter, code, error_type = error['linter'], error['code'], error['error_type']
return get_icon_(linter, code, error_type)
@lru_cache(maxsize=16)
def get_icon_(linter, code, error_type):
# type: (persist.LinterName, str, str) -> str
icon = get_value_('icon', linter, code, error_type, 'none')
if icon in ('circle', 'dot', 'bookmark', 'none'): # Sublime Text has some default icons
return icon
elif icon != os.path.basename(icon):
return icon
elif persist.settings.get('gutter_theme').endswith('.gutter-theme'):
theme_path = os.path.dirname(persist.settings.get('gutter_theme'))
if not icon.endswith('.png'):
icon += '.png'
return '{}/{}'.format(theme_path, icon)
else:
theme = persist.settings.get('gutter_theme')
if not icon.endswith('.png'):
icon += '.png'
return 'Packages/SublimeLinter/gutter-themes/{}/{}'.format(theme, icon)
def get_icon_scope(error):
# type: (persist.LintError) -> str
if COLORIZE:
return get_value('scope', error)
else:
return WHITE_SCOPE
| {
"repo_name": "SublimeLinter/SublimeLinter3",
"path": "lint/style.py",
"copies": "1",
"size": "4137",
"license": "mit",
"hash": 7030456014647995000,
"line_mean": 28.9782608696,
"line_max": 92,
"alpha_frac": 0.6183224559,
"autogenerated": false,
"ratio": 3.7303877366997296,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48487101925997295,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from json import JSONDecodeError
from urllib.parse import urljoin, urlparse
import requests
from django.conf import settings
from django.db.models import Q
from workshops.models import (
Person,
Role,
Organization,
Sponsorship,
Task,
)
from workshops.util import create_username
class BaseAPIClient(requests.Session):
"""
An API client that abstracts away the work of dealing with URLs.
Usage:
> client = APIClient(event)
> list(client) -> returns a list of all objects returned by the API.
> client[23] -> returns the object with pk=23
"""
ROOT_ENDPOINT = 'api/'
@lru_cache(maxsize=None)
def __new__(cls, event):
"""
Returns an instance of APIClient.
Throws NotImplementedError if an API does not exist at the root URL.
"""
try:
r = requests.get(urljoin(event.url, cls.ROOT_ENDPOINT))
r.raise_for_status()
r.json()
except (requests.exceptions.HTTPError, JSONDecodeError):
raise NotImplementedError('Conference site does not support an API')
return super().__new__(cls)
def __init__(self, event):
'''Populate API endpoint and set up basic authentication'''
super().__init__()
self.event = event
self.endpoint = urljoin(event.url, self.ENDPOINT)
self.auth = (
settings.PYDATA_USERNAME_SECRET, settings.PYDATA_PASSWORD_SECRET)
def __iter__(self):
try:
r = self.get(self.endpoint)
r.raise_for_status()
pydata_objs = r.json()
except (requests.exceptions.HTTPError, JSONDecodeError) as e:
raise IOError('Cannot fetch instances from API: {}'.format(str(e)))
for obj in pydata_objs:
yield self.parse(obj)
def __contains__(self, pk):
try:
self.get(self.endpoint + str(pk)).raise_for_status()
except requests.exceptions.HTTPError:
return False
else:
return True
def __getitem__(self, pk):
if pk not in self:
raise KeyError(
'{} does not exist'.format(self.model._meta.verbose_name)
)
obj = self.get(self.endpoint + str(pk)).json()
return self.parse(obj)
class PersonAPIClient(BaseAPIClient):
ENDPOINT = 'api/speaker/'
model = Person
def parse(self, speaker):
speaker['name'] = speaker['name'].strip()
personal = speaker['name'].rsplit(' ', 1)[0]
family = speaker['name'].rsplit(' ', 1)[-1]
return Person(
username=speaker['username'],
personal=personal,
family=family,
email=speaker['email'],
url=speaker['absolute_url'],
)
class TaskAPIClient(BaseAPIClient):
ENDPOINT = 'api/presentation/'
model = Task
def parse(self, presentation):
return Task(
event=self.event,
person=Person.objects.get_or_create(
email=presentation['speaker']['email'],
defaults={
'username': create_username('', presentation['speaker']['username']),
'personal': presentation['speaker']['name'].rsplit(' ', 1)[0],
'family': presentation['speaker']['name'].rsplit(' ', 1)[-1],
'url': presentation['speaker']['absolute_url'],
}
)[0],
role=Role.objects.get(name='presenter'),
title=presentation['title'],
url=presentation['absolute_url'],
)
class SponsorshipAPIClient(BaseAPIClient):
ENDPOINT = 'api/sponsor/'
model = Sponsorship
def parse(self, sponsor):
domain = urlparse(sponsor['external_url']).netloc
organization = Organization.objects.filter(
Q(fullname=sponsor['name']) | Q(domain=domain)
).first()
if not organization:
organization = Organization.objects.create(
fullname=sponsor['name'],
domain=domain,
notes=sponsor['annotation'],
)
return Sponsorship(
organization=organization,
event=self.event,
amount=sponsor['level']['cost'],
contact=Person.objects.get_or_create(
email=sponsor['contact_email'],
defaults={
'username': create_username('', sponsor['contact_name']),
'personal': sponsor['contact_name'].rsplit(' ', 1)[0],
'family': sponsor['contact_name'].rsplit(' ', 1)[-1],
},
)[0],
)
| {
"repo_name": "vahtras/amy",
"path": "pydata/api.py",
"copies": "1",
"size": "4704",
"license": "mit",
"hash": 2670075810124097000,
"line_mean": 31.6666666667,
"line_max": 89,
"alpha_frac": 0.5612244898,
"autogenerated": false,
"ratio": 4.363636363636363,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00033125072809556,
"num_lines": 144
} |
from functools import lru_cache
from langcodes import Language, best_match
# Text in scripts written without spaces has to be handled specially in our
# tokenization regex (see TOKEN_RE in tokens.py). Also, when one of these is
# the script of the language we're analyzing, then we need to either have
# a specific tokenizer for the language or give up.
SPACELESS_SCRIPTS = [
# Han ideographs are spaceless, but they don't need to appear in this list
# because _almost_ all of them, except for some exceptional Japanese
# characters, are covered by the \p{IsIdeo} check. Checking for
# Script=Hani and IsIdeo slows down our regexes with huge, redundant
# classes of characters. Instead, we'll list the exceptions below.
'Hira', # Hiragana
'Kana', # Katakana
'Thai', # Thai script
'Khmr', # Khmer script
'Laoo', # Lao script
'Mymr', # Burmese script
'Tale', # Tai Le script
'Talu', # Tai Lü script
'Lana', # Lanna script
]
EXTRA_JAPANESE_CHARACTERS = 'ー々〻〆'
# ー is a lengthening mark that's both hiragana and katakana. Unicode
# segmentation handles it as a special case, but we're overriding standard
# Unicode segmentation, so we need to have the special case too.
#
# 々 and 〻 are "iteration marks" that stand for the previous kanji. So they
# act identically to kanji (ideograms) without technically _being_ kanji. That
# technicality doesn't matter to us.
#
# 〆 is a Japanese abbreviation for "total", and even this can be used in the
# middle of words. Why isn't it just considered an ideograph? I don't know, I
# didn't come up with this language, or Unicode for that matter.
#
# None of this even comes up when we're trying to tokenize Chinese and
# Japanese. It comes up when we're trying to _not_ tokenize a word because
# it's Chinese or Japanese and the tokenization doesn't really matter, which
# happens in ConceptNet.
def _language_in_list(language, targets, min_score=80):
"""
A helper function to determine whether this language matches one of the
target languages, with a match score above a certain threshold.
The languages can be given as strings (language tags) or as Language
objects. `targets` can be any iterable of such languages.
"""
matched = best_match(language, targets, min_score=min_score)
return matched[1] > 0
@lru_cache(maxsize=None)
def get_language_info(language):
"""
Looks up the things we need to know about how to handle text in a given
language. This will return a dictionary with the following fields:
'script': a BCP 47 script code such as 'Latn', 'Cyrl', 'Hans'...
Indicates the script that tokens in this language should be in,
_after_ our preprocessing. The script for 'zh' is 'Hans', for example,
because even if the input is in Traditional Chinese ('Hant'), we
convert it to Simplified.
'tokenizer': 'regex', 'jieba', 'mecab', or None
Indicates the best way we know to separate tokens in the language.
'regex' is what will be used for most languages, meaning that we can
segment the text with a Unicode-aware regular expression. If a language
generally uses spaces to separate words, the regex will work well.
'jieba' and 'mecab' are tokenizers for specific languages written
without spaces.
A tokenizer of None means we don't have a good way to segment the
language. We'll use the regex anyway, but the results will be pretty
bad.
'normal_form': 'NFC' or 'NFKC'
How "should" Unicode be normalized when comparing text in this
language? This is not a standard, it's just based on experience.
Many languages need NFKC normalization for text comparisons to work
properly, but in many European languages, NFKC normalization is
excessive and loses information.
'remove_marks': True or False
Determines whether marks and decorations, such as vowel points and
tatweels, should be removed. True for languages in abjad scripts.
'dotless_i': True or False
Is "ı" the lowercase of "I" in this language, as in Turkish?
'diacritics_under': 'cedillas', 'commas', or None
Should we convert any diacritics that are under the letters "s" and
"t" in this language? 'cedillas' means we should convert commas to
cedillas, and 'commas' means we should convert cedillas to commas.
'transliteration': 'sr-Latn', 'az-Latn', or None
Indicates a type of transliteration that we should use for normalizing
a multi-script language. 'sr-Latn' means to use Serbian romanization,
and 'az-Latn' means to use Azerbaijani romanization.
'lookup_transliteration': 'zh-Hans' or None
Indicates a lossy transliteration that should be not be used for output,
but should be applied when looking up words in a list. 'zh-Hans' means
that we should convert Traditional Chinese characters to Simplified.
"""
# The input is probably a string, so parse it into a Language. If it's
# already a Language, it will pass through.
language = Language.get(language)
# Assume additional things about the language, such as what script it's in,
# using the "likely subtags" table
language_full = language.maximize()
# Start the `info` dictionary with default values, including the 'script'
# value that we now know from `language_full`.
info = {
'script': language_full.script,
'tokenizer': 'regex',
'normal_form': 'NFKC',
'remove_marks': False,
'dotless_i': False,
'diacritics_under': None,
'transliteration': None,
'lookup_transliteration': None
}
if _language_in_list(language, ['ja', 'ko']):
info['tokenizer'] = 'mecab'
elif _language_in_list(language, ['zh', 'yue']):
info['tokenizer'] = 'jieba'
elif info['script'] in SPACELESS_SCRIPTS:
info['tokenizer'] = None
# Cased alphabetic scripts get NFC normal form
if info['script'] in ['Latn', 'Grek', 'Cyrl']:
info['normal_form'] = 'NFC'
if info['script'] in ['Arab', 'Hebr']:
info['remove_marks'] = True
if _language_in_list(language, ['tr', 'az', 'kk']):
info['dotless_i'] = True
info['diacritics_under'] = 'cedillas'
elif _language_in_list(language, ['ro']):
info['diacritics_under'] = 'commas'
if _language_in_list(language, ['sr']):
info['transliteration'] = 'sr-Latn'
elif _language_in_list(language, ['az']):
info['transliteration'] = 'az-Latn'
if language.language == 'zh' and language.script != 'Hant':
info['lookup_transliteration'] = 'zh-Hans'
return info
| {
"repo_name": "LuminosoInsight/wordfreq",
"path": "wordfreq/language_info.py",
"copies": "1",
"size": "6793",
"license": "mit",
"hash": 1412795037747228000,
"line_mean": 38.3895348837,
"line_max": 80,
"alpha_frac": 0.6748339483,
"autogenerated": false,
"ratio": 3.7266226622662266,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49014566105662266,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from logging import getLogger
from os.path import join
from typing import Any, Callable, Dict, Optional, List, Sequence, Set, Tuple
import requests
import json
import csv
from ..const import PROPER_STOP_NAMES, ACTIVE_RAIL_STATIONS, HEADERS, \
GIST_MISSING_STOPS, GIST_RAIL_PLATFORMS
from ..parser.dataobj import ZTMStop, ZTMStopGroup
"""
Module reposible for handling handling stop data.
Converts ZTM group-stake hierarchy to GTFS representations of real-life structures.
Fills missing data from external gists (see ../const.py); manages which stops to export,
and all that kind of jazz.
"""
def normalize_stop_name(name: str) -> str:
"""Attempts to fix stop names provided by ZTM"""
# add .title() if ZTM provides names in ALL-UPPER CASE again
name = name.replace(".", ". ") \
.replace("-", " - ") \
.replace(" ", " ") \
.replace("al.", "Al.") \
.replace("pl.", "Pl.") \
.replace("os.", "Os.") \
.replace("ks.", "Ks.") \
.replace("Ak ", "AK ") \
.replace("Ch ", "CH ") \
.replace("gen.", "Gen.") \
.replace("rondo ", "Rondo ") \
.rstrip()
return name
def should_town_be_added_to_name(group: ZTMStopGroup) -> bool:
"""Checks whether town name should be added to the stop name"""
# List of conditions that, if true, mean town name shouldn't be added
dont_add_conditions: Set[Callable[[ZTMStopGroup], bool]] = {
lambda g: g.town_code == "--", # Stops in Warsaw
lambda g: g.id[1:3] in {"90", "91", "92"}, # Railway stations
lambda g: "PKP" in g.name, # Stops near train stations
lambda g: "WKD" in g.name, # Stops near WKD stations
lambda g: g.town.casefold() in g.name.casefold(), # Town name is already in stop name
# Any part of town name is already in the stop name
lambda g: any(part in g.name.casefold() for part in g.town.casefold().split(" "))
}
# Check if all dont_add_conditions fail
return not any(rule(group) for rule in dont_add_conditions)
def avg_position(stops: Sequence[ZTMStop]) -> Optional[Tuple[float, float]]:
"""Returns the average position of all stops"""
lats = (i.lat for i in stops if i.lon is not None)
lons = (i.lon for i in stops if i.lon is not None)
count = len(stops)
if count < 1:
return None
return sum(lats) / count, sum(lons) / count
@lru_cache(maxsize=None)
def get_missing_stops() -> Dict[str, Tuple[float, float]]:
"""Gets positions of stops from external gist, as ZTM sometimes omits stop coordinates"""
with requests.get(GIST_MISSING_STOPS) as req:
req.raise_for_status()
return req.json()
@lru_cache(maxsize=None)
def get_rail_platforms() -> Dict[str, Dict[str, Any]]:
"""Gets info about railway stations from external gist"""
with requests.get(GIST_RAIL_PLATFORMS) as req:
req.raise_for_status()
return req.json()
class StopHandler:
def __init__(self, version: str) -> None:
self.logger = getLogger(f"WarsawGTFS.{version}.StopHandler")
# Stop data
self.names = PROPER_STOP_NAMES.copy()
self.data: Dict[str, Dict[str, Any]] = {}
self.parents: Dict[str, str] = {}
self.zones: Dict[str, str] = {}
# Invalid stop data
self.invalid: Dict[str, ZTMStop] = {}
self.change: Dict[str, Optional[str]] = {}
# Used stops
self.used_invalid: Set[str] = set()
self.used: Set[str] = set()
# External data
self.missing_stops: Dict[str, Tuple[float, float]] = {}
self.rail_platforms: Dict[str, Dict[str, Any]] = {}
self._load_external()
def _load_external(self) -> None:
"""Loads data from external gists"""
self.logger.info("Loading data from external gists")
self.missing_stops = get_missing_stops()
self.rail_platforms = get_rail_platforms()
@staticmethod
def _match_virtual(virt: ZTMStop, stakes: Sequence[ZTMStop]) -> Optional[str]:
"""Try to find a normal stake corresponding to given virtual stake"""
# Find normal stakes with matching position
if virt.lat is not None and virt.lon is not None:
with_same_pos = [i.id for i in stakes if i.code[0] != "8"
and i.lat == virt.lat and i.lon == virt.lon]
else:
with_same_pos = []
# Find normal stakes with matching code
with_same_code = [i.id for i in stakes if i.code[0] != "8"
and i.code[1] == virt.code[1]]
# Special Case: Metro Młociny 88 → Metro Młociny 28
if virt.id == "605988" and "605928" in with_same_code:
return "605928"
# Matched stakes with the same position
if with_same_pos:
return with_same_pos[0]
# Matched stakes with the same code
elif with_same_code:
return with_same_code[0]
# Unable to find a match
else:
return None
def _find_missing_positions(self, stops: List[ZTMStop]) -> None:
"""Matches data from missing_stops to a list of loaded ZTMStops."""
for idx, stop in enumerate(stops):
if stop.lat is None or stop.lon is None:
missing_pos = self.missing_stops.get(stop.id)
if missing_pos:
stops[idx].lat, stops[idx].lon = missing_pos
def _load_normal_group(self, group_name: str, stops: List[ZTMStop]):
"""Saves info about normal stop group"""
for stop in stops:
# Fix virtual stops
if stop.code[0] == "8":
change_to = self._match_virtual(stop, stops)
if change_to is not None:
self.change[stop.id] = change_to
else:
self.invalid[stop.id] = stop
continue
# Handle undefined stop positions
if stop.lat is None or stop.lon is None:
self.invalid[stop.id] = stop
continue
# Save stake into self.data
self.data[stop.id] = {
"stop_id": stop.id,
"stop_name": group_name + " " + stop.code,
"stop_lat": stop.lat,
"stop_lon": stop.lon,
"wheelchair_boarding": stop.wheelchair,
}
def _load_railway_group(self, group_id: str, group_name: str, virt_stops: List[ZTMStop]):
"""Saves data about a stop group representing a railway station"""
# Nop KM & WKD stations
if group_id not in ACTIVE_RAIL_STATIONS:
for i in virt_stops:
self.change[i.id] = None
return
# Load station info
station_data = self.rail_platforms.get(group_id, {})
# If this station is not in rail_platforms, average all stake positions
# In order to calculate an approx. position of the station
if not station_data:
avg_pos = avg_position(virt_stops)
if avg_pos:
station_lat, station_lon = avg_pos
# Halt processing if we have no geographical data
else:
for i in virt_stops:
self.change[i.id] = None
return
# Otherwise get the position from rail_platforms data
else:
station_lat, station_lon = map(float, station_data["pos"].split(","))
group_name = station_data["name"]
# Map every stake into one node
if (not station_data) or station_data["oneplatform"]:
self.data[group_id] = {
"stop_id": group_id,
"stop_name": group_name,
"stop_lat": station_lat,
"stop_lon": station_lon,
"zone_id": station_data.get("zone_id", ""),
"stop_IBNR": station_data.get("ibnr_code", ""),
"stop_PKPPLK": station_data.get("pkpplk_code", ""),
"wheelchair_boarding": station_data.get("wheelchair", "0"),
}
for i in virt_stops:
self.change[i.id] = group_id
# Process multi-platform station
else:
# Add hub entry
self.data[group_id] = {
"stop_id": group_id,
"stop_name": group_name,
"stop_lat": station_lat,
"stop_lon": station_lon,
"location_type": "1",
"parent_station": "",
"zone_id": station_data.get("zone_id", ""),
"stop_IBNR": station_data.get("ibnr_code", ""),
"stop_PKPPLK": station_data.get("pkpplk_code", ""),
"wheelchair_boarding": station_data.get("wheelchair", "0"),
}
# Platforms
for platform_id, platform_pos in station_data["platforms"].items():
platform_lat, platform_lon = map(float, platform_pos.split(","))
platform_code = platform_id.split("p")[1]
platform_name = f"{group_name} peron {platform_code}"
# Add platform entry
self.data[platform_id] = {
"stop_id": platform_id,
"stop_name": platform_name,
"stop_lat": platform_lat,
"stop_lon": platform_lon,
"location_type": "0",
"parent_station": group_id,
"zone_id": station_data.get("zone_id", ""),
"stop_IBNR": station_data.get("ibnr_code", ""),
"stop_PKPPLK": station_data.get("pkpplk_code", ""),
"wheelchair_boarding": station_data.get("wheelchair", "0"),
}
# Add to self.parents
self.parents[platform_id] = group_id
# Stops → Platforms
for stop in virt_stops:
# Defined stake in rail_platforms
if stop.id in station_data["stops"]:
self.change[stop.id] = station_data["stops"][stop.id]
# Unknown stake
elif stop.id not in {"491303", "491304"}:
self.logger.warn(
f"No platform defined for railway PR entry {group_name} {stop.id}"
)
def load_group(self, group: ZTMStopGroup, stops: List[ZTMStop]) -> None:
"""Loads info about stops of a specific group"""
# Fix name "Kampinoski Pn" town name
if group.town == "Kampinoski Pn":
group.town = "Kampinoski PN"
# Fix group name
group.name = normalize_stop_name(group.name)
# Add town name to stop name & save name to self.names
if (fixed_name := self.names.get(group.id)):
group.name = fixed_name
elif should_town_be_added_to_name(group):
group.name = group.town + " " + group.name
self.names[group.id] = group.name
else:
self.names[group.id] = group.name
# Add missing positions to stakes
self._find_missing_positions(stops)
# Parse stakes
if group.id[1:3] in {"90", "91", "92"}:
self._load_railway_group(group.id, group.name, stops)
else:
self._load_normal_group(group.name, stops)
def get_id(self, original_id: Optional[str]) -> Optional[str]:
"""
Should the stop_id be changed, provide the correct stop_id.
If given stop_id has its position undefined returns None.
"""
if original_id is None:
return None
valid_id = self.change.get(original_id, original_id)
if valid_id is None or valid_id in self.invalid:
return None
elif valid_id in self.invalid:
self.used_invalid.add(valid_id)
return None
else:
return valid_id
def use(self, stop_id: str) -> None:
"""Mark provided GTFS stop_id as used"""
# Check if this stop belogins to a larger group
parent_id = self.parents.get(stop_id)
# Mark the parent as used
if parent_id is not None:
self.used.add(parent_id)
self.used.add(stop_id)
def zone_set(self, group_id: str, zone_id: str) -> None:
"""Saves assigned zone for a particular stop group"""
current_zone = self.zones.get(group_id)
# Zone has not changed: skip
if current_zone == zone_id:
return
if current_zone is None:
self.zones[group_id] = zone_id
# Boundary stops shouldn't generate a zone conflict warning
elif current_zone == "1/2" or zone_id == "1/2":
self.zones[group_id] = "1/2"
else:
self.logger.warn(
f"Stop group {group_id} has a zone confict: it was set to {current_zone!r}, "
f"but now it needs to be set to {zone_id!r}"
)
self.zones[group_id] = "1/2"
def export(self, gtfs_dir: str):
"""Exports all used stops (and their parents) to {gtfs_dir}/stops.txt"""
# Export all stops
self.logger.info("Exporting stops")
with open(join(gtfs_dir, "stops.txt"), mode="w", encoding="utf8", newline="") as f:
writer = csv.DictWriter(f, HEADERS["stops.txt"])
writer.writeheader()
for stop_id, stop_data in self.data.items():
# Check if stop was used or (is a part of station and not a stop-chlid)
if stop_id in self.used or (stop_data.get("parent_station") in self.used
and stop_data.get("location_type") != "1"):
# Set the zone_id
if not stop_data.get("zone_id"):
zone_id = self.zones.get(stop_id[:4])
if zone_id is None:
self.logger.warn(
f"Stop group {stop_id[:4]} has no zone_id assigned (using '1/2')"
)
zone_id = "1/2"
stop_data["zone_id"] = zone_id
writer.writerow(stop_data)
# Calculate unused entries from missing_stops.json
unused_missing = set(self.missing_stops.keys()).difference(self.used_invalid)
# Dump missing stops info
self.logger.info("Exporting missing_stops.json")
with open("missing_stops.json", "w") as f:
json.dump(
{"missing": sorted(self.used_invalid), "unused": sorted(unused_missing)},
f,
indent=2
)
| {
"repo_name": "MKuranowski/WarsawGTFS",
"path": "static/converter/stophandler.py",
"copies": "1",
"size": "14868",
"license": "mit",
"hash": 3687844764460532700,
"line_mean": 35.515970516,
"line_max": 97,
"alpha_frac": 0.5389584174,
"autogenerated": false,
"ratio": 3.7606275303643724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9796952131868353,
"avg_score": 0.0005267631792037006,
"num_lines": 407
} |
from functools import lru_cache
from math import ceil, floor, log2
from .utils import argmin, greatest_pow2
@lru_cache(maxsize=None)
def C(p, q):
if p == 0:
return 1
if p > q:
return 0
return C(p, q - 1) + C(p - 1, q)
class RMQ:
def __init__(self, array):
self.array = array
self.block_size = ceil(log2(len(array)) / 4)
self.block_cnt = ceil(len(self.array) / self.block_size)
self.block_mins = self._calculate_block_mins()
self.processed_block_mins = self._process_block_mins()
self.rmq_map = dict()
self.signatures = self._compute_signatures()
def _absolute_pos(self, block, index):
return self.block_size * block + index
def _block_element(self, block, index):
return self.array[self._absolute_pos(block, index)]
def _max_element_index(self, block):
return min(self.block_size, len(self.array) - self.block_size * block)
def _block_items(self, block, start=0, end=None):
if end is None:
end = self._max_element_index(block)
return (
self._block_element(block, i) for i in range(start, end)
)
def _get_block(self, i):
return i // self.block_size, i % self.block_size
def _calculate_block_mins(self):
return [
self._absolute_pos(b, argmin(self._block_items(b)))
for b in range(self.block_cnt)
]
def _process_block_mins(self):
max_size = floor(log2(len(self.block_mins)))
res = [[i for i in self.block_mins]]
def global_argmin(*sub_blocks):
return sub_blocks[argmin(self.array[i] for i in sub_blocks)]
for si in range(max_size):
t = [
global_argmin(res[si][i], res[si][i + 2**si]) for i in range(len(self.block_mins) - 2**si)
] + [
res[si][i] for i in range(len(self.block_mins) - 2**si, len(self.block_mins))
]
res.append(t)
return res
def _preprocess_min(self, b):
rmq = list()
for i in range(self._max_element_index(b)):
m = self._block_element(b, i)
p = i
q = dict()
for j in range(i, self._max_element_index(b)):
if self._block_element(b, j) < m:
m = self._block_element(b, j)
p = j
q[j] = p
rmq.append(q)
return rmq
def _signature(self, b):
sgn = 0
r = []
sz = self._max_element_index(b)
for i, v in enumerate(self._block_items(b)):
while len(r) > 0 and r[-1] > v:
sgn += C(sz - i - 1, sz - i + len(r))
r.pop()
r.append(v)
if sgn not in self.rmq_map:
self.rmq_map[sgn] = self._preprocess_min(b)
return sgn
def _compute_signatures(self):
return [self._signature(b) for b in range(self.block_cnt)]
def _query_whole_blocks(self, bi, bj):
cnt = floor(log2(bj - bi))
sub_blocks = [
self.processed_block_mins[cnt][bi],
self.processed_block_mins[cnt][bj - 2 ** cnt],
]
return sub_blocks[argmin(self.array[i] for i in sub_blocks)]
def _query_partial_block(self, b, i=0, j=None):
if j is None:
j = self._max_element_index(b)
rmq = self.rmq_map[self.signatures[b]]
return self._absolute_pos(b, rmq[i][j - 1])
def query(self, i, j):
pos = self.query_pos(i, j)
if pos is not None:
return self.array[pos]
def query_pos(self, i, j):
if i >= j:
return None
bi, pi = self._get_block(i)
bj, pj = self._get_block(j)
m_pos = None
if bi == bj:
return self._query_partial_block(bi, i=pi, j=pj)
if pi:
bi += 1
if pj:
m_pos = self._query_partial_block(bj, j=pj)
if bi < bj:
mb = self._query_whole_blocks(bi, bj)
if m_pos is None or self.array[mb] <= self.array[m_pos]:
m_pos = mb
if pi:
mi = self._query_partial_block(bi - 1, i=pi)
if m_pos is None or self.array[mi] <= self.array[m_pos]:
m_pos = mi
return m_pos
| {
"repo_name": "mhozza/string_algorithms",
"path": "string_algorithms/rmq.py",
"copies": "1",
"size": "4323",
"license": "mit",
"hash": 5300163592047080000,
"line_mean": 30.3260869565,
"line_max": 106,
"alpha_frac": 0.5149201943,
"autogenerated": false,
"ratio": 3.2774829416224414,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4292403135922441,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from math import floor, log
from typing import Dict
d = {
# generate_d
0: 1,
1: 2,
2: 3,
4: 4,
8: 6,
16: 9,
32: 14,
64: 22,
128: 35,
256: 56,
512: 90,
1024: 145,
2048: 234,
4096: 378,
8192: 611,
16384: 988,
32768: 1598,
65536: 2585,
131072: 4182,
262144: 6766,
524288: 10947,
1048576: 17712,
2097152: 28658,
4194304: 46369,
8388608: 75026,
16777216: 121394,
33554432: 196419,
67108864: 317812,
134217728: 514230,
268435456: 832041,
536870912: 1346270,
# generate_d101x
2: 3,
5: 5,
11: 8,
23: 13,
47: 21,
95: 34,
191: 55,
383: 89,
767: 144,
1535: 233,
3071: 377,
6143: 610,
12287: 987,
24575: 1597,
49151: 2584,
98303: 4181,
196607: 6765,
393215: 10946,
786431: 17711,
1572863: 28657,
3145727: 46368,
6291455: 75025,
12582911: 121393,
25165823: 196418,
50331647: 317811,
100663295: 514229,
201326591: 832040,
402653183: 1346269,
}
b = {}
@lru_cache(maxsize=512)
def findIntegers(num: int) -> int:
if num <= 9:
return d[num]
else:
i = 7
power = 3
val = d[7]
while i < num:
val += d[2 ** (power - 1)]
power += 1
i = 2 ** power
return brute_force(num)
def brute_force(num: int, start=0) -> int:
sol = 0
for i in range(start, num + 1):
if "11" not in f"{i:b}":
sol += 1
return sol
def closest_power_of_two(x):
return int(floor(log(x) / log(2)))
def brute_force_with_d(num: int, d: Dict[int, int]) -> int:
if f"{num:b}".startswith("11"):
# Before: 1458.925 vs 342.385
# After: 1213.933 vs 251.989
num_arr: List[int] = [1] * len(f"{num:b}")
num_arr[1] = 0
num = sum(d * 2 ** i for i, d in enumerate(num_arr[::-1]))
start = 2 ** closest_power_of_two(num)
sol = d[start]
sol += brute_force(num, start=start + 1)
return sol
def try3(num: int, d: Dict[int, int]) -> int:
if f"{num:b}".startswith("11"):
# Comparing with brute_force_with_d
# 245.319 vs 162.209
num_arr: List[int] = [1] * len(f"{num:b}")
num_arr[1] = 0
num = sum(digit * 2 ** i for i, digit in enumerate(num_arr[::-1]))
if num in d: # 10_111...111 or 2**i
return d[num]
# At this point, we have a 1011...111 number, but the 1-block at the end
# has at leas one zero in it
start = 2 ** closest_power_of_two(num)
sol = d[start]
sol += brute_force(num, start=start + 1)
return sol
def try4(num: int, d: Dict[int, int]) -> int:
if f"{num:b}".startswith("11"):
# Comparing with try3
# 162.209 vs
num_arr: List[int] = [1] * len(f"{num:b}")
num_arr[1] = 0
num = sum(d * 2 ** i for i, d in enumerate(num_arr[::-1]))
if num in d: # 10_111...111 or 2**i
return d[num]
# At this point, we have a 1011...111 number, but the 1-block at the end
# has at leas one zero in it
#
# We know the value for 1000...000, but we need the missing values from there
# to 1011...111
# This is the same as 0 to 11.111
start = 2 ** closest_power_of_two(num)
num_arr: List[int] = [int(digit) for digit in f"{num:b}"]
num_arr[0] = 0
assert num_arr[1] == 0, "was not 0 at pos 1!"
num = sum(digit * 2 ** i for i, digit in enumerate(num_arr[::-1]))
return d[start] - 1 + try4(num, d)
def generate_d():
for i in range(30):
num = 2 ** i
val = brute_force(num)
print(f"{num}: {val},")
def generate_d101x():
for i in range(2, 30):
num_arr: List[int] = [1] * i
num_arr[1] = 0
num = sum(digit * 2 ** i for i, digit in enumerate(num_arr[::-1]))
val = brute_force_with_d(num, d)
print(f"{num}: {val},")
def check(randvals=100):
import random
import time
random.seed(0)
brute_time = 0
impr_time = 0
for i in range(randvals):
num = random.randint(0, 10 ** 8 + 1)
print(f"num={num:,}, next2={2**closest_power_of_two(num):,}")
t0 = time.time()
v1 = try3(num, d)
t1 = time.time()
v2 = try4(num, d)
t2 = time.time()
brute_time += t1 - t0
impr_time += t2 - t1
assert v1 == v2, f"for {num:,}: {v1:,} vs {v2:,}"
print(f"{t1- t0:0.3f}s vs {t2-t1:0.3f}s")
print(f"{brute_time:0.3f} vs {impr_time:0.3f}")
check()
## Hard:
# * 97_956_359
# * 96_597_127
# * 85_956_173
# * 48_056_573
# * 82_996_081
| {
"repo_name": "MartinThoma/algorithms",
"path": "Python/leetcode_600.py",
"copies": "1",
"size": "4681",
"license": "mit",
"hash": -1931442726737507000,
"line_mean": 22.405,
"line_max": 81,
"alpha_frac": 0.5176244392,
"autogenerated": false,
"ratio": 2.6902298850574713,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8704771668430913,
"avg_score": 0.0006165311653116531,
"num_lines": 200
} |
from functools import lru_cache
from math import (
ceil,
sqrt,
)
from sequence import Sequence
class Prime(Sequence):
_NUMS = [2, 3]
@classmethod
def _append(cls):
cls._append_many()
@classmethod
def _append_one(cls):
(candidate, flag) = (cls._NUMS[-1], False)
while not flag:
(candidate, flag) = (candidate + 2, True)
end = int(sqrt(candidate)) + 1
for prime in cls._NUMS:
if end < prime:
break
if candidate % prime == 0:
flag = False
break
cls._NUMS.append(candidate)
@classmethod
def _append_many(cls):
largest = cls._NUMS[-1]
lower_bound = largest + 1
upper_bound = largest * 2 + 1
candidates = [True] * (upper_bound - lower_bound)
for prime in cls._NUMS:
lowest_multiple = int(ceil(lower_bound / prime)) * prime
for i in range(lowest_multiple, upper_bound, prime):
candidates[i - lower_bound] = False
for i in range(len(candidates)):
if candidates[i]:
cls._NUMS.append(i + lower_bound)
@classmethod
@lru_cache(maxsize=None)
def contains(cls, n):
if n < 2:
return False
end = int(sqrt(n)) + 1
while cls._NUMS[-1] < end:
cls._append()
for p in cls.gen_nums(end):
if n % p == 0:
return False
return True
| {
"repo_name": "mackorone/euler",
"path": "src/prime.py",
"copies": "1",
"size": "1519",
"license": "mit",
"hash": 6836271841193496000,
"line_mean": 26.125,
"line_max": 68,
"alpha_frac": 0.5075707702,
"autogenerated": false,
"ratio": 3.9454545454545453,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4953025315654545,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from menpo.shape import PointCloud
from menpo.transform import Scale, Translation
from menpo3d.rasterize import (
rasterize_barycentric_coordinate_images,
rasterize_mesh_from_barycentric_coordinate_images,
rasterize_shape_image_from_barycentric_coordinate_images)
from menpodetect import load_dlib_frontal_face_detector
from menpofit.aam import load_balanced_frontal_face_fitter
from .camera import perspective_camera_for_template
from .data import load_template, LANDMARK_MASK
@lru_cache()
def load_fitter():
return load_balanced_frontal_face_fitter()
@lru_cache()
def load_detector():
return load_dlib_frontal_face_detector()
def align_mesh_to_template(source, target, scale_corrective=1.2):
scale = Scale((target.norm() / source.norm()) * scale_corrective,
n_dims=target.n_dims)
translation = Translation(target.centre() - source.centre())
return translation.compose_before(scale)
def landmark_mesh(mesh, img_shape=(320, 240), verbose=False):
fitter = load_balanced_frontal_face_fitter()
detector = load_dlib_frontal_face_detector()
camera = perspective_camera_for_template(img_shape)
# Pre-process - align the mesh roughly with the template
aligned_mesh = align_mesh_to_template(mesh, load_template()).apply(mesh)
mesh_in_img = camera.apply(aligned_mesh)
bcs = rasterize_barycentric_coordinate_images(mesh_in_img, img_shape)
img = rasterize_mesh_from_barycentric_coordinate_images(mesh_in_img, *bcs)
shape_img = rasterize_shape_image_from_barycentric_coordinate_images(
mesh, *bcs)
# 2. Find the one bounding box in the rendered image
bboxes = detector(img)
if len(bboxes) != 1:
raise ValueError(
"Expected to find one face - found {}".format(len(bboxes)))
else:
if verbose:
print('Detected 1 face')
# 3. Fit from the bounding box
fr = fitter.fit_from_bb(img, bboxes[0])
if verbose:
print('AMM fitting successfully completed')
# 4. Sample the XYZ image to build back the landmarks
img_lms = fr.final_shape.from_mask(LANDMARK_MASK)
# test to see if the landmark fell on the 3D surface or not
occlusion_mask = img.mask.sample(img_lms).ravel()
img.landmarks['__lsfm_on_surface'] = img_lms.from_mask(occlusion_mask)
img.landmarks['__lsfm_off_surface'] = img_lms.from_mask(~occlusion_mask)
return_dict = {
'landmarks_2d': img_lms,
'occlusion_mask': occlusion_mask,
'landmarks_3d_masked': PointCloud(shape_img.sample(
img.landmarks['__lsfm_on_surface']).T)
}
if (~occlusion_mask).sum() != 0:
groups = ['dlib_0', '__lsfm_on_surface', '__lsfm_off_surface']
marker_edge_colours = ['blue', 'yellow', 'red']
else:
groups = ['dlib_0', '__lsfm_on_surface']
marker_edge_colours = ['blue', 'yellow']
lm_img = img.rasterize_landmarks(group=groups,
line_colour='blue',
marker_edge_colour=marker_edge_colours)
return_dict['landmarked_image'] = lm_img
return return_dict
| {
"repo_name": "menpo/lsfm",
"path": "lsfm/landmark.py",
"copies": "1",
"size": "3169",
"license": "bsd-3-clause",
"hash": -2888775015307180000,
"line_mean": 36.2823529412,
"line_max": 78,
"alpha_frac": 0.6667718523,
"autogenerated": false,
"ratio": 3.3252885624344177,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44920604147344173,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from metrics import ratio_of_classes
from numpy import log2
from typing import Callable, Mapping, Sequence, List
from warnings import warn
import pandas as pd
class Gene:
"""Stores gene's identifier and description (multiton).
At a time there can be only one gene with given identifier,
i.e. after the first initialization, all subsequent attempts
to initialize a gene with the same identifier will return
exactly the same object. This is so called multiton pattern.
Example:
>>> x = Gene('TP53')
>>> y = Gene('TP53')
>>> assert x is y # passes, there is only one gene
"""
instances = {}
__slots__ = ('name', 'description', 'id')
def __new__(cls, *args, **kwargs):
if not args:
# for pickling the requirements are lessened
# ONLY for pickling
return super(Gene, cls).__new__(cls)
name = args[0]
if name not in cls.instances:
gene = super(Gene, cls).__new__(cls)
gene.__init__(*args, **kwargs)
gene.id = len(cls.instances) - 1
cls.instances[name] = gene
return cls.instances[name]
def __init__(self, name, description=None):
self.name = name
self.description = description
def __repr__(self):
return f'<Gene: {self.name}>'
class Sample:
"""Sample contains expression values for genes."""
def __init__(self, name, data: Mapping[Gene, float]):
self.name = name
self.data = data
@property
def genes(self):
return self.data.keys()
@classmethod
def from_names(cls, name, data: Mapping[str, float]):
"""Create a sample from a gene_name: value mapping.
Args:
name: name of sample
data: mapping (e.g. dict) where keys represent gene names
"""
return cls(name, {Gene(gene_name): value for gene_name, value in data.items()})
@classmethod
def from_array(cls, name, panda_series: pd.Series, descriptions=False):
"""Create a sample from pd.Series or equivalent.
Args:
name: name of the sample
panda_series:
series object where columns represent values of genes and
names are either gene identifiers of tuples:
``(gene_identifier, description)``
descriptions:
are descriptions present in names of the series object?
"""
gene_maker = Gene
if descriptions:
gene_maker = lambda data: Gene(*data)
return cls(name, {
gene_maker(key): value
for key, value in panda_series.to_dict().items()
})
def as_array(self):
"""
Returns:
one-dimensional labeled array with Gene objects as labels
"""
return pd.Series(self.data)
def __eq__(self, other):
return self.name == other.name and self.data == other.data
def __repr__(self):
return f'<Sample "{self.name}" with {len(self.data)} genes>'
def exclude_genes(self, gene_list: list):
for gene in gene_list:
assert isinstance(gene, Gene)
if gene in self.data.keys():
del self.data[gene]
def first_line(file_object, skip_rows=0):
line = None
while not (line and skip_rows < 0):
line = file_object.readline()
if line:
skip_rows -= 1
# return to the beginning
file_object.seek(0)
return line
# TODO class variable with set of genes + method(s) for checking data integrity
class SampleCollection:
"""A collection of samples of common origin or characteristic.
An example sample_collection can be:
(Breast_cancer_sample_1, Breast_cancer_sample_2) named "Breast cancer".
The common origin/characteristics for "Breast cancer" sample_collection could be
"a breast tumour", though samples had been collected from two donors.
Another example are controls:
(Control_sample_1, Control_sample_2) named "Control".
The common characteristic for these samples is that both are controls.
"""
def __init__(self, name: str, samples=None):
self.samples: List[Sample] = samples or []
self.name = name
# integrity check
# Raises AssertionError if there is inconsistency in genes in samples.
# genes = self.samples[0].genes
# assert all(sample.genes == genes for sample in self.samples[1:])
@property
def labels(self):
return [sample.name for sample in self.samples]
@property
@lru_cache(maxsize=1)
def genes(self):
"""
Returns:
all genes present in the collection of samples.
"""
genes = self.samples[0].genes
return genes
@lru_cache(maxsize=None)
def of_gene(self, gene):
return tuple(
sample.data[gene]
for sample in self.samples
)
def as_array(self):
"""
Returns:
`pandas.DataFrame`: two-dimensional labeled array with Gene objects as row labels,
storing data from all samples
"""
df = pd.DataFrame()
for sample in self.samples:
if df.empty:
df = sample.as_array().to_frame(sample.name)
else:
kwargs = {sample.name: sample.as_array().values}
df = df.assign(**kwargs)
return df
def __add__(self, other):
return SampleCollection(self.name, self.samples + other.samples)
@classmethod
def from_file(
cls, name, file_object,
columns_selector: Callable[[Sequence[int]], Sequence[int]] = None,
samples=None, delimiter: str = '\t', index_col: int = 0,
use_header=True, reverse_selection=False, prefix=None,
header_line=0, description_column=None
):
"""Create a sample_collection (collection of samples) from csv/tsv file.
Args:
name:
a name of the sample_collection (or group of samples) which will
identify it (like "Tumour_1" or "Control_in_20_degrees")
file_object: a file (containing gene expression)
of the following structure:
- names of samples separated by a tab in the first row,
- gene symbol/name followed by gene expression values
for every sample in remaining rows;
an additional column "description" is allowed between genes
column and sample columns, though it has to be explicitly
declared with `description_column` argument.
columns_selector:
a function which will select (and return) a subset of
provided column identifiers (do not use with `samples`)
samples:
a list of names of samples to extract from the file
(do not use with `columns_selector`)
reverse_selection:
if you want to use all columns but the selected ones
(or all samples but the selected) set this to True
delimiter: the delimiter of the columns
index_col: column to use as the gene names
use_header: does the file have a header?
prefix: prefix for custom samples naming schema
header_line: number of non-empty line with sample names
None - do not use, 0 - use first row
description_column:
is column with description of present in the file
(on the second position, after gene identifiers)?
"""
if file_object.tell() != 0:
warn(f'Passed file object: {file_object} was read before.')
raise Exception()
line = first_line(file_object, header_line or 0)
header_items = [item.strip() for item in line.split('\t')]
gene_columns = [index_col]
if description_column:
description_column = 1
gene_columns.append(description_column)
else:
if any('description' == name.lower() for name in header_items):
warn(
'First line of your file contains "description" column, '
'but you did not provide "--description_column" argument.'
)
# a reasonable assumption is that the columns with samples
# start after columns with gene symbol and gene description
column_shift = max(gene_columns) + 1
if columns_selector:
# sniff how many columns do we have in the file
columns_count = line.count(delimiter)
all_sample_columns = list(range(column_shift, columns_count + 1))
# generate identifiers (numbers) for all columns
# and take the requested subset
columns = columns_selector(all_sample_columns)
if reverse_selection:
columns = list(columns)
columns = [c for c in all_sample_columns if c not in columns]
# https://github.com/pandas-dev/pandas/issues/9098#issuecomment-333677100
columns = gene_columns + list(columns)
else:
columns = None
if not use_header:
if samples:
raise ValueError(
'To select samples by their name, you need a file with '
'samples names in the header. If you use such file, '
'please set `use_header=True`, otherwise skip `samples` '
'in your arguments.'
)
if header_line:
warn(
'`header_line` has no effect when '
'`use_header` is set to `False`'
)
# we could leave it to pandas, but it shows an ugly,
# not very helpful message. It is better to show the
# user where exactly the problem occurs.
if samples:
available_samples = [
name
for name in header_items[column_shift:]
]
lacking_samples = set(samples) - set(available_samples)
if lacking_samples:
raise ValueError(
f'Samples {lacking_samples} are not available in {file_object.name} file.\n'
f'Following samples were found: {", ".join(available_samples)}.'
)
if index_col:
# TODO https://github.com/pandas-dev/pandas/issues/9098
warn(
'Using "samples" with "index_col" 0 may cause an '
'unexpected behaviour due to an upstream issue in '
'pandas package (pandas-dev/pandas/issues/9098) '
'for pandas in versions older than 0.21.'
)
additional_column_names = [
header_items[index]
for index in gene_columns
]
# https://github.com/pandas-dev/pandas/issues/9098#issuecomment-333677100
samples = additional_column_names + list(samples)
# just to reassure that the pointer is on the beginning
if file_object.tell() != 0:
warn('Passed file object was read before.')
if samples and columns:
warn(
'Please, provide either columns or samples, '
'not both. We will use columns this time.'
)
try:
data = pd.read_table(
file_object,
delimiter=delimiter,
# None - do not use, 0 - use first row
header=header_line if use_header else None,
index_col=gene_columns,
usecols=columns or samples,
prefix=f'{prefix}_' if prefix else ''
)
except Exception as e:
from traceback import print_tb
from traceback import print_stack
print_tb(e)
print(e)
descriptions = description_column is not None
samples = [
Sample.from_array(sample_name, sample_data, descriptions=descriptions)
for sample_name, sample_data in data.items()
]
return cls(name, samples)
@classmethod
def from_gct_file(cls, name, file_object, **kwargs):
"""Parse file in Gene Cluster Text file format, as defined on:
software.broadinstitute.org/cancer/software/gsea/wiki/index.php/Data_formats
User is allowed to provide settings different from the standard.
"""
version = file_object.readline()
rows_count, samples_count = map(int, file_object.readline().split('\t'))
default_values = {
'description_column': True,
'header_line': 2
}
if version != '#1.2\n':
warn('Unsupported version of GCT file')
file_object.seek(0)
for key, value in default_values.items():
kwargs[key] = value
self = cls.from_file(
name, file_object,
**kwargs
)
# if user did not choose a subset of samples
if not any(key in kwargs for key in ['samples', 'columns_selector']):
# check if the samples numbers are ok
if len(self.samples) != samples_count:
warn(
f'Samples count ({len(self.samples)}) '
f'does not match with the {samples_count} '
f'declared in {name} file.'
)
if rows_count != len(self.samples[0].genes):
warn(
f'Number of rows ({rows_count}) does not match '
f'with the {len(self.samples[0].genes)} '
f'declared in {name} file'
)
return self
@classmethod
def from_csv_file(cls, name, file_object, **kwargs):
if 'delimiter' in kwargs:
if kwargs['delimiter'] != ',':
warn(
'You are using not comma delimiter for what looks like csv file. '
'Is this really the thing you want to do?'
)
else:
kwargs['delimiter'] = ','
return cls.from_file(name, file_object, **kwargs)
def exclude_genes(self, gene_list: list):
for sample in self.samples:
sample.exclude_genes(gene_list)
# TODO class variable with set of genes + method(s) for checking data integrity
class Experiment:
"""
Stores all user's experiment data.
"""
def __init__(self, case: SampleCollection, control: SampleCollection):
self.control = control
self.case = case
def get_all(self):
return self.control + self.case
def calculate_fold_change(self):
"""
Returns:
`pandas.DataFrame` object: two-dimensional labeled array with Gene objects as row labels, storing
fold change and log transformed fold change values for every gene - fold change of the expression
level of given gene in the sample under study to the normal level (average in a control group)
"""
fc = {}
for (idx, row) in self.get_all().as_array().iterrows():
control = [row[label] for label in self.control.labels]
case = [row[label] for label in self.case.labels]
ratio = ratio_of_classes(case, control)
fc[idx] = [ratio, log2(ratio)]
return pd.DataFrame.from_dict(fc, orient="index", columns=['FC', 'logFC'])
def exclude_genes(self, gene_list: list):
self.get_all().exclude_genes(gene_list)
| {
"repo_name": "sienkie/pathways-analysis",
"path": "models.py",
"copies": "1",
"size": "15802",
"license": "mit",
"hash": -1136555631575083800,
"line_mean": 32.9827956989,
"line_max": 109,
"alpha_frac": 0.5651183395,
"autogenerated": false,
"ratio": 4.457545839210155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5522664178710155,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from operator import attrgetter
from os.path import sep, splitext, isdir, exists, join
from os import listdir
from urllib.parse import quote_plus
import ipgetter
from logging import getLogger
from discord import ChannelType
from roboto import config, disc
log = getLogger(__name__)
# Continue through the media list via incrementing media_idx
media_continuous = True
class MediaFile(object):
def __init__(self, path, idx=0):
self.path = path
self.idx = idx
def name(self):
return self.path.split(sep)[-1]
@property
def is_dir(self):
pcs = self.path.split(sep)
return len(pcs) > 1
@property
def safe_path(self):
return quote_plus(self.path)
valid_music_ext = {'.flac', '.mp3'}
def is_media_file(path):
try:
return splitext(path)[1].lower() in valid_music_ext
except IndexError:
return False
async def send_now_playing(server_id, channel_id=None):
from roboto.state import servers
server_state = await servers.get_server(server_id)
title = find_song_path(server_state.song_id)
if not title:
log.warning("No title for now playing")
return
msg = "Now Playing: {}".format(title)
chan = None
if channel_id:
chan = disc.dc.get_channel(channel_id)
if chan:
await disc.dc.send_message(chan, content=msg)
else:
if channel_id:
log.warning("Failed to find requested channel for NP")
server = disc.dc.get_server(server_id)
for channel in server.channels:
if not channel.type == ChannelType.voice:
await disc.dc.send_message(channel, content=msg)
break
def find_song_path(song_id, full=False):
if not song_id:
return None
files = fetch_media_files()
try:
path = files[int(song_id)]
except IndexError:
return None
else:
if full:
return join(config['music_path'], path.path)
return path.path
@lru_cache(maxsize=None)
def fetch_media_files(path=None):
""" Build a simple list of valid media files. This function assumes you have a shallow dir tree
no deeper than 1 folder.
:param path:
:return: []MediaFile
"""
if path is None:
try:
path = config["music_path"]
except KeyError:
return []
files = []
if exists(path) and isdir(path):
for f in listdir(path):
if isdir(join(path, f)):
for f2 in listdir(join(path, f)):
if is_media_file(f2):
files.append(MediaFile(join(f, f2)))
else:
if is_media_file(f):
files.append(MediaFile(f))
files.sort(key=attrgetter("path"))
# Store index *after* sort process
for idx, f in enumerate(files, start=0):
f.song_id = idx
return files
def after_media_handler(player):
# try:
# if not player.server_state.media_continuous:
# return
#
# from roboto.commands import TaskState, dispatcher, Commands
# task = TaskState(Commands.next, [], server_id=player.server_state.server_id)
# dispatcher.put_nowait(task)
#
# except Exception as err:
# log.exception("Media finalizer error")
pass
def play_next(server_state):
if server_state.song_id is not None:
return play_file(server_state, server_state.song_id + 1)
return False
def play_file(server_state, song_id: int):
if not server_state.voice_client:
return
old_media_player = server_state.get_media_player()
if old_media_player:
old_media_player.is_playing()
old_media_player.stop()
avcon = config.get_bool("use_avcon", False)
full_path = find_song_path(song_id, full=True)
media_player = server_state.voice_client.create_ffmpeg_player(
full_path, use_avconv=avcon, after=after_media_handler)
media_player.volume = 1.0
media_player.start()
server_state.set_active_media_player(media_player)
server_state.song_id = int(song_id)
return True
def music_set_vol(player, vol):
if player:
player.volume = float(vol)
def music_stop(server):
player = server.get_media_player()
if player:
if player.is_playing():
player.stop()
if player.is_alive():
player.join(timeout=5)
server.set_active_media_player(None)
return True
return False
ext_ip = None
async def play_youtube(server_state, url, volume=0.5):
music_stop(server_state)
media_player = await server_state.voice_client.create_ytdl_player(url, use_avconv=False)
music_set_vol(media_player, volume)
media_player.start()
media_player.server_state = server_state
return media_player.title
@lru_cache(maxsize=None)
def music_playlist_url(server_id):
global ext_ip
try:
if not ext_ip:
ext_ip = ipgetter.myip()
except Exception:
ext_ip = "0.0.0.0"
return "http://{}:{}/{}".format(ext_ip, config.get("http_port", 8080), server_id)
| {
"repo_name": "leighmacdonald/roboto",
"path": "roboto/media.py",
"copies": "1",
"size": "5168",
"license": "mit",
"hash": 6970096636813686000,
"line_mean": 26.2,
"line_max": 99,
"alpha_frac": 0.6170665635,
"autogenerated": false,
"ratio": 3.5715272978576365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9684350097794201,
"avg_score": 0.0008487527126869358,
"num_lines": 190
} |
from functools import lru_cache
from pkg_resources import iter_entry_points
from typing import (
Any,
List,
Mapping,
Type,
)
from marshmallow.fields import Field
from microcosm_flask.swagger.parameters.base import ParameterBuilder
from microcosm_flask.swagger.parameters.default import DefaultParameterBuilder
ENTRY_POINT = "microcosm_flask.swagger.parameters"
class Parameters:
"""
Plugin-aware swagger parameter builder.
Discovers builder subclasses via the `microcosm_flask.swagger.parameters` entry point
and delegates to the first compatible implementation.
"""
def __init__(self, strict_enums: bool = True):
self.strict_enums = strict_enums
def build(self, field: Field) -> Mapping[str, Any]:
"""
Build a swagger parameter from a marshmallow field.
"""
builder_types = self.builder_types() + [
# put default last
self.default_builder_type()
]
builders: List[ParameterBuilder] = [
builder_type(
build_parameter=self.build, # type: ignore
strict_enums=self.strict_enums,
)
for builder_type in builder_types
]
builder = next(
builder
for builder in builders
if builder.supports_field(field)
)
return builder.build(field)
@classmethod
# NB: entry point lookups can be slow; memoize
@lru_cache()
def builder_types(cls) -> List[Type[ParameterBuilder]]:
"""
Define the available builder types.
"""
return [
entry_point.load()
for entry_point in iter_entry_points(ENTRY_POINT)
]
@classmethod
def default_builder_type(cls) -> Type[ParameterBuilder]:
"""
Define the default builder type.
"""
return DefaultParameterBuilder
| {
"repo_name": "globality-corp/microcosm-flask",
"path": "microcosm_flask/swagger/parameters/__init__.py",
"copies": "1",
"size": "1917",
"license": "apache-2.0",
"hash": -5411442114039876000,
"line_mean": 24.56,
"line_max": 89,
"alpha_frac": 0.6155451226,
"autogenerated": false,
"ratio": 4.406896551724138,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00014814814814814815,
"num_lines": 75
} |
from functools import lru_cache
from pyramid.security import (
ALL_PERMISSIONS,
Allow,
Authenticated,
Deny,
DENY_ALL,
Everyone,
)
from pyramid.threadlocal import get_current_request
from pyramid.traversal import (
find_root,
traverse,
)
import contentbase
from ..schema_formats import is_accession
@lru_cache()
def _award_viewing_group(award_uuid, root):
award = root.get_by_uuid(award_uuid)
return award.upgrade_properties().get('viewing_group')
ALLOW_EVERYONE_VIEW = [
(Allow, Everyone, 'view'),
]
ALLOW_SUBMITTER_ADD = [
(Allow, 'group.submitter', 'add')
]
ALLOW_VIEWING_GROUP_VIEW = [
(Allow, 'role.viewing_group_member', 'view'),
]
ALLOW_LAB_SUBMITTER_EDIT = [
(Allow, 'role.viewing_group_member', 'view'),
(Allow, 'group.admin', 'edit'),
(Allow, 'role.lab_submitter', 'edit'),
]
ALLOW_CURRENT = [
(Allow, Authenticated, 'view'),
(Allow, 'group.admin', 'edit'),
]
ONLY_ADMIN_VIEW = [
(Allow, 'group.admin', ALL_PERMISSIONS),
(Allow, 'group.read-only-admin', ['view']),
# Avoid schema validation errors during audit
(Allow, 'remoteuser.EMBED', ['view', 'expand', 'audit', 'import_items']),
(Allow, 'remoteuser.INDEXER', ['view', 'index']),
DENY_ALL,
]
DELETED = [
(Deny, Everyone, 'visible_for_edit')
] + ONLY_ADMIN_VIEW
def paths_filtered_by_status(request, paths, exclude=('deleted', 'replaced'), include=None):
if include is not None:
return [
path for path in paths
if traverse(request.root, path)['context'].__json__(request).get('status') in include
]
else:
return [
path for path in paths
if traverse(request.root, path)['context'].__json__(request).get('status') not in exclude
]
class Collection(contentbase.Collection):
def __init__(self, *args, **kw):
super(Item.Collection, self).__init__(*args, **kw)
if hasattr(self, '__acl__'):
return
# XXX collections should be setup after all types are registered.
# Don't access type_info.schema here as that precaches calculated schema too early.
if 'lab' in self.type_info.factory.schema['properties']:
self.__acl__ = ALLOW_SUBMITTER_ADD
def get(self, name, default=None):
resource = super(Collection, self).get(name, None)
if resource is not None:
return resource
if is_accession(name):
resource = self.connection.get_by_unique_key('accession', name)
if resource is not None:
if resource.collection is not self and resource.__parent__ is not self:
return default
return resource
if ':' in name:
resource = self.connection.get_by_unique_key('alias', name)
if resource is not None:
if resource.collection is not self and resource.__parent__ is not self:
return default
return resource
return default
class Item(contentbase.Item):
Collection = Collection
STATUS_ACL = {
# standard_status
'released': ALLOW_CURRENT,
'deleted': DELETED,
'replaced': DELETED,
# shared_status
'current': ALLOW_CURRENT,
'disabled': ONLY_ADMIN_VIEW,
# file
'obsolete': ONLY_ADMIN_VIEW,
# antibody_characterization
'compliant': ALLOW_CURRENT,
'not compliant': ALLOW_CURRENT,
'not reviewed': ALLOW_CURRENT,
'not submitted for review by lab': ALLOW_CURRENT,
# antibody_lot
'eligible for new data': ALLOW_CURRENT,
'not eligible for new data': ALLOW_CURRENT,
'not pursued': ALLOW_CURRENT,
# dataset / experiment
'release ready': ALLOW_VIEWING_GROUP_VIEW,
'revoked': ALLOW_CURRENT,
# publication
'published': ALLOW_CURRENT,
}
@property
def __name__(self):
if self.name_key is None:
return self.uuid
properties = self.upgrade_properties()
if properties.get('status') == 'replaced':
return self.uuid
return properties.get(self.name_key, None) or self.uuid
def __acl__(self):
# Don't finalize to avoid validation here.
properties = self.upgrade_properties().copy()
status = properties.get('status')
return self.STATUS_ACL.get(status, ALLOW_LAB_SUBMITTER_EDIT)
def __ac_local_roles__(self):
roles = {}
properties = self.upgrade_properties().copy()
if 'lab' in properties:
lab_submitters = 'submits_for.%s' % properties['lab']
roles[lab_submitters] = 'role.lab_submitter'
if 'award' in properties:
viewing_group = _award_viewing_group(properties['award'], find_root(self))
if viewing_group is not None:
viewing_group_members = 'viewing_group.%s' % viewing_group
roles[viewing_group_members] = 'role.viewing_group_member'
return roles
def unique_keys(self, properties):
keys = super(Item, self).unique_keys(properties)
if 'accession' not in self.schema['properties']:
return keys
keys.setdefault('accession', []).extend(properties.get('alternate_accessions', []))
if properties.get('status') != 'replaced' and 'accession' in properties:
keys['accession'].append(properties['accession'])
return keys
class SharedItem(Item):
''' An Item visible to all authenticated users while "proposed" or "in progress".
'''
def __ac_local_roles__(self):
roles = {}
properties = self.upgrade_properties().copy()
if 'lab' in properties:
lab_submitters = 'submits_for.%s' % properties['lab']
roles[lab_submitters] = 'role.lab_submitter'
roles[Authenticated] = 'role.viewing_group_member'
return roles
def contextless_has_permission(permission):
request = get_current_request()
return request.has_permission('forms', request.root)
@contentbase.calculated_property(context=Item.Collection, category='action')
def add(item_uri, item_type, has_permission):
if has_permission('add') and contextless_has_permission('forms'):
return {
'name': 'add',
'title': 'Add',
'profile': '/profiles/{item_type}.json'.format(item_type=item_type),
'href': '{item_uri}#!add'.format(item_uri=item_uri),
}
@contentbase.calculated_property(context=Item, category='action')
def edit(item_uri, item_type, has_permission):
if has_permission('edit') and contextless_has_permission('forms'):
return {
'name': 'edit',
'title': 'Edit',
'profile': '/profiles/{item_type}.json'.format(item_type=item_type),
'href': item_uri + '#!edit',
}
@contentbase.calculated_property(context=Item, category='action')
def edit_json(item_uri, item_type, has_permission):
if has_permission('edit'):
return {
'name': 'edit-json',
'title': 'Edit JSON',
'profile': '/profiles/{item_type}.json'.format(item_type=item_type),
'href': item_uri + '#!edit-json',
}
| {
"repo_name": "philiptzou/clincoded",
"path": "src/clincoded/types/base.py",
"copies": "1",
"size": "7287",
"license": "mit",
"hash": 2508191901959448000,
"line_mean": 31.53125,
"line_max": 101,
"alpha_frac": 0.6013448607,
"autogenerated": false,
"ratio": 3.8072100313479624,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9905257303422179,
"avg_score": 0.0006595177251565579,
"num_lines": 224
} |
from functools import lru_cache
from pyramid.security import (
ALL_PERMISSIONS,
Allow,
Authenticated,
Deny,
DENY_ALL,
Everyone,
)
from pyramid.traversal import (
find_root,
traverse,
)
import contentbase
from ..schema_formats import is_accession
@lru_cache()
def _award_viewing_group(award_uuid, root):
award = root.get_by_uuid(award_uuid)
return award.upgrade_properties().get('viewing_group')
ALLOW_EVERYONE_VIEW = [
(Allow, Everyone, 'view'),
]
ALLOW_SUBMITTER_ADD = [
(Allow, 'group.submitter', 'add')
]
ALLOW_VIEWING_GROUP_VIEW = [
(Allow, 'role.viewing_group_member', 'view'),
]
ALLOW_LAB_SUBMITTER_EDIT = [
(Allow, 'role.viewing_group_member', 'view'),
(Allow, 'group.admin', 'edit'),
(Allow, 'role.lab_submitter', 'edit'),
]
ALLOW_CURRENT = [
(Allow, Everyone, 'view'),
(Allow, 'group.admin', 'edit'),
]
ONLY_ADMIN_VIEW = [
(Allow, 'group.admin', ALL_PERMISSIONS),
(Allow, 'group.read-only-admin', ['view']),
# Avoid schema validation errors during audit
(Allow, 'remoteuser.EMBED', ['view', 'expand', 'audit', 'import_items']),
(Allow, 'remoteuser.INDEXER', ['view', 'index']),
DENY_ALL,
]
DELETED = [
(Deny, Everyone, 'visible_for_edit')
] + ONLY_ADMIN_VIEW
def paths_filtered_by_status(request, paths, exclude=('deleted', 'replaced'), include=None):
if include is not None:
return [
path for path in paths
if traverse(request.root, path)['context'].__json__(request).get('status') in include
]
else:
return [
path for path in paths
if traverse(request.root, path)['context'].__json__(request).get('status') not in exclude
]
class Collection(contentbase.Collection):
def __init__(self, *args, **kw):
super(Item.Collection, self).__init__(*args, **kw)
if hasattr(self, '__acl__'):
return
# XXX collections should be setup after all types are registered.
# Don't access type_info.schema here as that precaches calculated schema too early.
if 'lab' in self.type_info.factory.schema['properties']:
self.__acl__ = ALLOW_SUBMITTER_ADD
def get(self, name, default=None):
resource = super(Collection, self).get(name, None)
if resource is not None:
return resource
if is_accession(name):
resource = self.connection.get_by_unique_key('accession', name)
if resource is not None:
if resource.collection is not self and resource.__parent__ is not self:
return default
return resource
if ':' in name:
resource = self.connection.get_by_unique_key('alias', name)
if resource is not None:
if resource.collection is not self and resource.__parent__ is not self:
return default
return resource
return default
class Item(contentbase.Item):
Collection = Collection
STATUS_ACL = {
# standard_status
'released': ALLOW_CURRENT,
'deleted': DELETED,
'replaced': DELETED,
# shared_status
'current': ALLOW_CURRENT,
'disabled': ONLY_ADMIN_VIEW,
# file
'obsolete': ONLY_ADMIN_VIEW,
# antibody_characterization
'compliant': ALLOW_CURRENT,
'not compliant': ALLOW_CURRENT,
'not reviewed': ALLOW_CURRENT,
'not submitted for review by lab': ALLOW_CURRENT,
# antibody_lot
'eligible for new data': ALLOW_CURRENT,
'not eligible for new data': ALLOW_CURRENT,
'not pursued': ALLOW_CURRENT,
# dataset / experiment
'release ready': ALLOW_VIEWING_GROUP_VIEW,
'revoked': ALLOW_CURRENT,
# publication
'published': ALLOW_CURRENT,
# pipeline
'active': ALLOW_CURRENT,
'archived': ALLOW_CURRENT,
}
@property
def __name__(self):
if self.name_key is None:
return self.uuid
properties = self.upgrade_properties()
if properties.get('status') == 'replaced':
return self.uuid
return properties.get(self.name_key, None) or self.uuid
def __acl__(self):
# Don't finalize to avoid validation here.
properties = self.upgrade_properties().copy()
status = properties.get('status')
return self.STATUS_ACL.get(status, ALLOW_LAB_SUBMITTER_EDIT)
def __ac_local_roles__(self):
roles = {}
properties = self.upgrade_properties().copy()
if 'lab' in properties:
lab_submitters = 'submits_for.%s' % properties['lab']
roles[lab_submitters] = 'role.lab_submitter'
if 'award' in properties:
viewing_group = _award_viewing_group(properties['award'], find_root(self))
if viewing_group is not None:
viewing_group_members = 'viewing_group.%s' % viewing_group
roles[viewing_group_members] = 'role.viewing_group_member'
return roles
def unique_keys(self, properties):
keys = super(Item, self).unique_keys(properties)
if 'accession' not in self.schema['properties']:
return keys
keys.setdefault('accession', []).extend(properties.get('alternate_accessions', []))
if properties.get('status') != 'replaced' and 'accession' in properties:
keys['accession'].append(properties['accession'])
return keys
class SharedItem(Item):
''' An Item visible to all authenticated users while "proposed" or "in progress".
'''
def __ac_local_roles__(self):
roles = {}
properties = self.upgrade_properties().copy()
if 'lab' in properties:
lab_submitters = 'submits_for.%s' % properties['lab']
roles[lab_submitters] = 'role.lab_submitter'
roles[Authenticated] = 'role.viewing_group_member'
return roles
@contentbase.calculated_property(context=Item.Collection, category='action')
def add(context, request):
if request.has_permission('add') and request.has_permission('forms', request.root):
return {
'name': 'add',
'title': 'Add',
'profile': '/profiles/{ti.name}.json'.format(ti=context.type_info),
'href': '{item_uri}#!add'.format(item_uri=request.resource_path(context)),
}
@contentbase.calculated_property(context=Item, category='action')
def edit(context, request):
if request.has_permission('edit') and request.has_permission('forms', request.root):
return {
'name': 'edit',
'title': 'Edit',
'profile': '/profiles/{ti.name}.json'.format(ti=context.type_info),
'href': '{item_uri}#!edit'.format(item_uri=request.resource_path(context)),
}
@contentbase.calculated_property(context=Item, category='action')
def edit_json(context, request):
if request.has_permission('edit'):
return {
'name': 'edit-json',
'title': 'Edit JSON',
'profile': '/profiles/{ti.name}.json'.format(ti=context.type_info),
'href': '{item_uri}#!edit-json'.format(item_uri=request.resource_path(context)),
}
| {
"repo_name": "kidaa/encoded",
"path": "src/encoded/types/base.py",
"copies": "1",
"size": "7279",
"license": "mit",
"hash": 6828672249256526000,
"line_mean": 31.7882882883,
"line_max": 101,
"alpha_frac": 0.5992581399,
"autogenerated": false,
"ratio": 3.8169900367068696,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9912489052682836,
"avg_score": 0.0007518247848067147,
"num_lines": 222
} |
from functools import lru_cache
from statistics import mean
from string import punctuation
import warnings
from nltk.corpus import cmudict
from nltk.tokenize import word_tokenize, sent_tokenize
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from otdet.util import lazyproperty
class ReadabilityMeasures:
"""Extract features based on readablility measures."""
d = cmudict.dict()
INF = 10**9
def __init__(self, lowercase=True, remove_punct=True, measures=None,
**kwargs):
self.lowercase = lowercase
self.remove_punct = remove_punct
if measures is None:
self.measures = [
'fleschease', 'fleschgrade', 'fogindex', 'colemanliau',
'ari', 'lix', 'smog'
]
else:
self.measures = measures
def fit(self, *args, **kwargs):
# Do nothing
pass
def fit_transform(self, documents):
# Directly transform
return self.transform(documents)
def transform(self, documents):
"""Transform documents into vectors of readability measures."""
if self.lowercase:
contents = [doc.lower() for doc in documents]
else:
contents = documents
tokcontents = [TokenizedContent(cont, self.remove_punct)
for cont in contents]
return np.array([self._to_vector(tcont) for tcont in tokcontents])
def _to_vector(self, tokenized_content):
"""Convert a tokenized content to a feature vector."""
return np.array([getattr(self, m)(tokenized_content)
for m in self.measures])
@classmethod
def fleschease(cls, tokenized_content):
"""Return the Flesch-Kindaid Reading Ease measure."""
nwords = tokenized_content.num_words
nsents = tokenized_content.num_sents
nsylls = cls.total_sylls(tokenized_content)
try:
return 206.835 - 1.015*(nwords/nsents) - 84.6*(nsylls/nwords)
except ZeroDivisionError:
return cls.INF
@classmethod
def fleschgrade(cls, tokenized_content):
"""Return the Flesch-Kinaid Grade Level measure."""
nwords = tokenized_content.num_words
nsents = tokenized_content.num_sents
nsylls = cls.total_sylls(tokenized_content)
try:
return 11.8*(nsylls/nwords) + 0.39*(nwords/nsents) - 15.59
except ZeroDivisionError:
return cls.INF
@classmethod
def fogindex(cls, tokenized_content):
"""Return the Gunning-Fog index."""
nwords = tokenized_content.num_words
nsents = tokenized_content.num_sents
nwords3sylls = sum(sum(cls.num_syllables(w) >= 3 for w in s)
for s in tokenized_content)
try:
return (nwords/nsents) + (nwords3sylls/nwords)*100
except ZeroDivisionError:
return cls.INF
@classmethod
def colemanliau(cls, tokenized_content):
"""Return the Coleman-Liau formula."""
nchars = tokenized_content.num_chars
nwords = tokenized_content.num_words
nsents = tokenized_content.num_sents
try:
return 5.89*(nchars/nwords) - 0.3*(nsents/(nwords*100)) - 15.8
except ZeroDivisionError:
return cls.INF
@classmethod
def ari(cls, tokenized_content):
"""Return the Automated Readability Index."""
nchars = tokenized_content.num_chars
nwords = tokenized_content.num_words
nsents = tokenized_content.num_sents
try:
return 4.71*(nchars/nwords) + 0.5*(nwords/nsents) - 21.43
except ZeroDivisionError:
return cls.INF
@classmethod
def lix(cls, tokenized_content):
"""Return the Lix formula."""
nwords = tokenized_content.num_words
nwords6chars = sum(sum(len(w) >= 6 for w in s)
for s in tokenized_content)
nsents = tokenized_content.num_sents
try:
return (nwords/nsents) + 100*(nwords6chars/nwords)
except ZeroDivisionError:
return cls.INF
@classmethod
def smog(cls, tokenized_content):
"""Return the SMOG index."""
nwords3sylls = sum(sum(cls.num_syllables(w) >= 3 for w in s)
for s in tokenized_content)
nsents = tokenized_content.num_sents
try:
return 3 + ((nwords3sylls*30)/nsents)**0.5
except ZeroDivisionError:
return cls.INF
@staticmethod
def total_sylls(tokenized_content):
"""Return the total number of syllables in a tokenized content."""
return sum(sum(ReadabilityMeasures.num_syllables(w) for w in s)
for s in tokenized_content)
@staticmethod
@lru_cache(maxsize=None)
def num_syllables(word):
"""Return the number of syllables in a word."""
if word in ReadabilityMeasures.d:
res = mean(len([y for y in x if y[-1].isdigit()])
for x in ReadabilityMeasures.d[word])
else:
warnings.warn("No '{}' found in CMU corpus".format(word))
res = ReadabilityMeasures.avg_syllables(len(word))
return res
@staticmethod
@lru_cache(maxsize=None)
def avg_syllables(wordlen):
"""Return the avg number of syllables of words with given length."""
res = [ReadabilityMeasures.num_syllables(w)
for w in ReadabilityMeasures.d if len(w) == wordlen]
if len(res) == 0:
res = [ReadabilityMeasures.num_syllables(w)
for w in ReadabilityMeasures.d]
return mean(res)
class TokenizedContent:
"""Class representing a tokenized content."""
def __init__(self, content, remove_punct=True):
self._tokcont = [word_tokenize(s) for s in sent_tokenize(content)]
if remove_punct:
self._tokcont = [[w for w in s if w not in punctuation]
for s in self._tokcont[:]]
# Remove zero-length sentence
self._tokcont = [s for s in self._tokcont[:] if len(s) > 0]
def __iter__(self):
return iter(self._tokcont)
@lazyproperty
def num_sents(self):
"""Return the total number of sentences."""
return len(self._tokcont)
@lazyproperty
def num_words(self):
"""Return the total number of words."""
return sum(len(s) for s in self._tokcont)
@lazyproperty
def num_chars(self):
"""Return the total number of chars."""
return sum(sum(len(w) for w in s) for s in self._tokcont)
class CountVectorizerWrapper(CountVectorizer):
"""Wrapper around CountVectorizer class in scikit-learn."""
def __init__(self, *args, **kwargs):
super(CountVectorizerWrapper, self).__init__(*args, **kwargs)
def fit_transform(self, *args, **kwargs):
"""Wrapper around fit_transform() method in CountVectorizer."""
r = super(CountVectorizerWrapper, self).fit_transform(*args, **kwargs)
return r.toarray()
def transform(self, *args, **kwargs):
"""Wrapper around transform() method in CountVectorizer."""
r = super(CountVectorizerWrapper, self).transform(*args, **kwargs)
return r.toarray()
| {
"repo_name": "kemskems/otdet",
"path": "otdet/feature_extraction.py",
"copies": "1",
"size": "7325",
"license": "mit",
"hash": 7621188799809032000,
"line_mean": 33.880952381,
"line_max": 78,
"alpha_frac": 0.606552901,
"autogenerated": false,
"ratio": 3.839098532494759,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4945651433494759,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from typing import Any, Container, Dict, Iterable, List, Mapping, Optional, Sequence, Tuple, Union
from urllib.parse import urlparse
import orjson
import requests
from django.conf import settings
from requests.adapters import ConnectionError, HTTPAdapter
from requests.models import PreparedRequest, Response
from requests.packages.urllib3.util.retry import Retry
from zerver.lib.queue import queue_json_publish
from zerver.models import Client, Realm, UserProfile
from zerver.tornado.event_queue import process_notification
from zerver.tornado.sharding import get_tornado_port, get_tornado_uri, notify_tornado_queue_name
class TornadoAdapter(HTTPAdapter):
def __init__(self) -> None:
# All of the POST requests we make to Tornado are safe to
# retry; allow retries of them, which is not the default.
retry_methods = Retry.DEFAULT_METHOD_WHITELIST | set(["POST"])
retry = Retry(total=3, backoff_factor=1, method_whitelist=retry_methods)
super().__init__(max_retries=retry)
def send(
self,
request: PreparedRequest,
stream: bool = False,
timeout: Union[None, float, Tuple[float, float], Tuple[float, None]] = 0.5,
verify: Union[bool, str] = True,
cert: Union[None, bytes, str, Container[Union[bytes, str]]] = None,
proxies: Optional[Mapping[str, str]] = None,
) -> Response:
# Don't talk to Tornado through proxies, which only allow
# requests to external hosts.
proxies = {}
try:
resp = super().send(
request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies
)
except ConnectionError:
parsed_url = urlparse(request.url)
logfile = (
f"tornado-{parsed_url.port}.log"
if settings.TORNADO_PROCESSES > 1
else "tornado.log"
)
raise ConnectionError(
f"Django cannot connect to Tornado server ({request.url}); "
f"check {settings.ERROR_FILE_LOG_PATH} and {logfile}"
)
resp.raise_for_status()
return resp
@lru_cache(None)
def requests_client() -> requests.Session:
c = requests.Session()
adapter = TornadoAdapter()
for scheme in ("https://", "http://"):
c.mount(scheme, adapter)
return c
def request_event_queue(
user_profile: UserProfile,
user_client: Client,
apply_markdown: bool,
client_gravatar: bool,
slim_presence: bool,
queue_lifespan_secs: int,
event_types: Optional[Iterable[str]] = None,
all_public_streams: bool = False,
narrow: Iterable[Sequence[str]] = [],
bulk_message_deletion: bool = False,
) -> Optional[str]:
if not settings.USING_TORNADO:
return None
tornado_uri = get_tornado_uri(user_profile.realm)
req = {
"dont_block": "true",
"apply_markdown": orjson.dumps(apply_markdown),
"client_gravatar": orjson.dumps(client_gravatar),
"slim_presence": orjson.dumps(slim_presence),
"all_public_streams": orjson.dumps(all_public_streams),
"client": "internal",
"user_profile_id": user_profile.id,
"user_client": user_client.name,
"narrow": orjson.dumps(narrow),
"secret": settings.SHARED_SECRET,
"lifespan_secs": queue_lifespan_secs,
"bulk_message_deletion": orjson.dumps(bulk_message_deletion),
}
if event_types is not None:
req["event_types"] = orjson.dumps(event_types)
resp = requests_client().post(tornado_uri + "/api/v1/events/internal", data=req)
return resp.json()["queue_id"]
def get_user_events(
user_profile: UserProfile, queue_id: str, last_event_id: int
) -> List[Dict[str, Any]]:
if not settings.USING_TORNADO:
return []
tornado_uri = get_tornado_uri(user_profile.realm)
post_data: Dict[str, Any] = {
"queue_id": queue_id,
"last_event_id": last_event_id,
"dont_block": "true",
"user_profile_id": user_profile.id,
"secret": settings.SHARED_SECRET,
"client": "internal",
}
resp = requests_client().post(tornado_uri + "/api/v1/events/internal", data=post_data)
return resp.json()["events"]
def send_notification_http(realm: Realm, data: Mapping[str, Any]) -> None:
if not settings.USING_TORNADO or settings.RUNNING_INSIDE_TORNADO:
process_notification(data)
else:
tornado_uri = get_tornado_uri(realm)
requests_client().post(
tornado_uri + "/notify_tornado",
data=dict(data=orjson.dumps(data), secret=settings.SHARED_SECRET),
)
# The core function for sending an event from Django to Tornado (which
# will then push it to web and mobile clients for the target users).
# By convention, send_event should only be called from
# zerver/lib/actions.py, which helps make it easy to find event
# generation code.
#
# Every call point should be covered by a test in `test_events.py`,
# with the schema verified in `zerver/lib/event_schema.py`.
#
# See https://zulip.readthedocs.io/en/latest/subsystems/events-system.html
def send_event(
realm: Realm, event: Mapping[str, Any], users: Union[Iterable[int], Iterable[Mapping[str, Any]]]
) -> None:
"""`users` is a list of user IDs, or in the case of `message` type
events, a list of dicts describing the users and metadata about
the user/message pair."""
port = get_tornado_port(realm)
queue_json_publish(
notify_tornado_queue_name(port),
dict(event=event, users=list(users)),
lambda *args, **kwargs: send_notification_http(realm, *args, **kwargs),
)
| {
"repo_name": "hackerkid/zulip",
"path": "zerver/tornado/django_api.py",
"copies": "2",
"size": "5727",
"license": "apache-2.0",
"hash": 5565695577941948000,
"line_mean": 35.7115384615,
"line_max": 100,
"alpha_frac": 0.6476340143,
"autogenerated": false,
"ratio": 3.723667100130039,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005603618361539556,
"num_lines": 156
} |
from functools import lru_cache
from typing import Any, Container, Dict, Iterable, List, Mapping, Optional, Sequence, Tuple, Union
import orjson
import requests
from django.conf import settings
from requests.adapters import ConnectionError, HTTPAdapter
from requests.models import PreparedRequest, Response
from requests.packages.urllib3.util.retry import Retry
from zerver.lib.queue import queue_json_publish
from zerver.models import Client, Realm, UserProfile
from zerver.tornado.event_queue import process_notification
from zerver.tornado.sharding import get_tornado_port, get_tornado_uri, notify_tornado_queue_name
class TornadoAdapter(HTTPAdapter):
def __init__(self) -> None:
retry = Retry(total=3, backoff_factor=1)
super(TornadoAdapter, self).__init__(max_retries=retry)
def send(
self,
request: PreparedRequest,
stream: bool = False,
timeout: Union[None, float, Tuple[float, float], Tuple[float, None]] = 0.5,
verify: Union[bool, str] = True,
cert: Union[None, bytes, str, Container[Union[bytes, str]]] = None,
proxies: Optional[Mapping[str, str]] = None,
) -> Response:
if not proxies:
proxies = {}
merged_proxies = {**proxies, "no_proxy": "localhost,127.0.0.1"}
try:
resp = super().send(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=merged_proxies)
except ConnectionError:
raise ConnectionError(
f"Django cannot connect to Tornado server ({request.url}); "
f"check {settings.ERROR_FILE_LOG_PATH} and tornado.log"
)
resp.raise_for_status()
return resp
@lru_cache(None)
def requests_client() -> requests.Session:
c = requests.Session()
adapter = TornadoAdapter()
for scheme in ("https://", "http://"):
c.mount(scheme, adapter)
return c
def request_event_queue(user_profile: UserProfile, user_client: Client, apply_markdown: bool,
client_gravatar: bool, slim_presence: bool, queue_lifespan_secs: int,
event_types: Optional[Iterable[str]]=None,
all_public_streams: bool=False,
narrow: Iterable[Sequence[str]]=[],
bulk_message_deletion: bool=False) -> Optional[str]:
if not settings.TORNADO_SERVER:
return None
tornado_uri = get_tornado_uri(user_profile.realm)
req = {'dont_block': 'true',
'apply_markdown': orjson.dumps(apply_markdown),
'client_gravatar': orjson.dumps(client_gravatar),
'slim_presence': orjson.dumps(slim_presence),
'all_public_streams': orjson.dumps(all_public_streams),
'client': 'internal',
'user_profile_id': user_profile.id,
'user_client': user_client.name,
'narrow': orjson.dumps(narrow),
'secret': settings.SHARED_SECRET,
'lifespan_secs': queue_lifespan_secs,
'bulk_message_deletion': orjson.dumps(bulk_message_deletion)}
if event_types is not None:
req['event_types'] = orjson.dumps(event_types)
resp = requests_client().post(
tornado_uri + '/api/v1/events/internal',
data=req
)
return resp.json()['queue_id']
def get_user_events(user_profile: UserProfile, queue_id: str, last_event_id: int) -> List[Dict[str, Any]]:
if not settings.TORNADO_SERVER:
return []
tornado_uri = get_tornado_uri(user_profile.realm)
post_data: Dict[str, Any] = {
'queue_id': queue_id,
'last_event_id': last_event_id,
'dont_block': 'true',
'user_profile_id': user_profile.id,
'secret': settings.SHARED_SECRET,
'client': 'internal',
}
resp = requests_client().post(
tornado_uri + '/api/v1/events/internal',
data=post_data
)
return resp.json()['events']
def send_notification_http(realm: Realm, data: Mapping[str, Any]) -> None:
if not settings.TORNADO_SERVER or settings.RUNNING_INSIDE_TORNADO:
process_notification(data)
else:
tornado_uri = get_tornado_uri(realm)
requests_client().post(
tornado_uri + "/notify_tornado",
data=dict(data=orjson.dumps(data), secret=settings.SHARED_SECRET),
)
def send_event(realm: Realm, event: Mapping[str, Any],
users: Union[Iterable[int], Iterable[Mapping[str, Any]]]) -> None:
"""`users` is a list of user IDs, or in the case of `message` type
events, a list of dicts describing the users and metadata about
the user/message pair."""
port = get_tornado_port(realm)
queue_json_publish(notify_tornado_queue_name(port),
dict(event=event, users=list(users)),
lambda *args, **kwargs: send_notification_http(realm, *args, **kwargs))
| {
"repo_name": "brainwane/zulip",
"path": "zerver/tornado/django_api.py",
"copies": "1",
"size": "4896",
"license": "apache-2.0",
"hash": 7708094520352694000,
"line_mean": 39.131147541,
"line_max": 122,
"alpha_frac": 0.6266339869,
"autogenerated": false,
"ratio": 3.7865429234338746,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9899693954431534,
"avg_score": 0.0026965911804678663,
"num_lines": 122
} |
from functools import lru_cache
from typing import Callable, Dict, List, Optional, Any, cast
from xml.etree.ElementTree import Element
from xml.etree.ElementTree import tostring as et_str
import html5lib
import requests
import activesoup
_namespaces = ["http://www.w3.org/1999/xhtml"]
def _strip_namespace(etree: Element) -> Element:
if type(etree.tag) != type(_strip_namespace):
# For comments, the tag comes through as a function that, when invoked, returns the element.
for ns in _namespaces:
etree.tag = etree.tag.replace(f"{{{ns}}}", "")
for c in etree:
_strip_namespace(c)
return etree
class BoundTag(activesoup.Response):
"""A ``BoundTag`` represents a single node in an HTML document.
When a new HTML page is opened by the :py:class:`activesoup.Driver`,
the page is parsed, and a new ``BoundTag`` is created, which is a
handle to the top-level ``<html>`` element.
``BoundTag`` provides convenient access to data in the page:
Via field-style find operation (inspired by BeautifulSoup):
>>> page = html_page('<html><body><a id="link">link-text</a></body></html>')
>>> page.a.text()
'link-text'
Via dictionary-stype attribute lookup:
>>> page.a["id"]
'link'
A ``BoundTag`` wraps an :py:class:`xml.etree.ElementTree.Element`,
providing shortcuts for common operations. The underlying ``Element`` can
be accessed via :py:meth:`etree <BoundTag.etree>`. When child elements are
accessed via those helpers, they are also wrapped in a ``BoundTag`` object.
Note: a ``BoundTag`` object is created internally by the
:py:class:`activesoup.Driver` - you will generally not need to
construct one directly.
"""
def __init__(
self,
driver: "activesoup.Driver",
raw_response: requests.Response,
element: Element,
) -> None:
super().__init__(raw_response, "text/html")
self._driver = driver
self._et = element
@lru_cache(maxsize=1024)
def __getattr__(self, item: str) -> "BoundTag":
e = self._find(f".//{item}")
if e is not None:
return e
raise AttributeError(f"{type(self)} has no attribute {item}")
@lru_cache(maxsize=1024)
def __getitem__(self, attr: str) -> str:
return self._et.attrib[attr]
def find_all(self, element_matcher: str) -> List["BoundTag"]:
"""Find all matching elements on the current page
:param str element_matcher: match expression to be used.
:rtype: List[BoundTag]
The match expression is made relative (by prefixing with ``.//``) and
then forwarded to :py:meth:`etree's findall <python.xml.etree.ElementTree.Element.findall>`
on the parsed ``Element``.
Note that the general power of :py:mod:`xml.etree`'s XPath support is available, so
filter expressions work too:
>>> page = html_page('<html><body><a class="uncool">first link</a><a class="cool">second link</a></body></html>')
>>> links = page.find_all('a')
>>> links[0].text()
'first link'
>>> links[1].text()
'second link'
>>> cool_links = page.find_all('a[@class="cool"]')
>>> len(cool_links)
1
>>> cool_links[0].text()
'second link'
``find_all`` is a shortcut for ``.etree().findall()`` with a relative path:
.. code-block::
# The following are equivalent:
tag.find_all("a")
tag.etree().findall(".//a")
"""
return [
_get_bound_tag_factory(element_matcher)(self._driver, self._raw_response, e)
for e in self._et.findall(f".//{element_matcher}")
]
@lru_cache(maxsize=1024)
def find(self, xpath: str = None, **kwargs) -> Optional["BoundTag"]:
"""Find a single element matching the provided xpath expression
:param str xpath: xpath expression that will be forwarded to :py:meth:`etree's find <python.xml.etree.ElementTree.Element.find>`
:param kwargs: Optional dictionary of attribute values. If present,
``activesoup`` will append attribute filters to the XPath expression
:rtype: Optional[BoundTag]
Note that unlike :py:meth:`find_all`, the path is not first made relative.
>>> page = html_page('<html><body><input type="text" name="first" /><input type="checkbox" name="second" /></body></html>')
>>> page.find(".//input", type="checkbox")["name"]
'second'
The simplest use-case, of returning the first matching item for a
particular tag, can be done via the field-stype find shortcut:
>>> first_input = page.input
>>> first_input["name"]
'first'
``find`` is a shortcut for ``.etree().find()``:
.. code-block::
# The following are equivalent except that the returned value is wrapped in a BoundTag
page.find('input', type="checkbox")
page.find('input[@type="checkbox"]')
page.etree().find('input[@type="checkbox"]')
# The following are equivalent except that the returned value is wrapped in a BoundTag
page.find('.//input')
page.input
"""
return self._find(xpath, **kwargs)
def text(self) -> Optional[str]:
"""Access the text content of an HTML node
:rtype: Optional[str]
>>> page = html_page('<html><body><p>Hello world</p></body></html>')
>>> p = page.p
>>> p.text()
'Hello world'
``text`` is a shortcut fro ``.etree().text``:
.. code-block::
# The following are equivalent:
p.text()
p.etree().text
"""
return self._et.text
def html(self) -> bytes:
"""Render this element's HTML as bytes
:rtype: bytes
The output is generated from the parsed HTML structure, as interpretted by ``html5lib``.
``html5lib`` is how ``activesoup`` interprets pages in the same way as the browser would,
and that might mean making some changes to the structure of the document - for example,
if the original HTML contained errors.
"""
return et_str(self._et)
def attrs(self) -> Dict[str, str]:
return self._et.attrib
def etree(self) -> Element:
"""Access the wrapped :py:class:`etree.Element <xml.etree.ElementTree.Element>` object
The other methods on this class class are generally shortcuts to
functionality provided by the underlying ``Element`` - with the
difference that where applicable they wrap the results in a new
``BoundTag``.
:rtype: Element
"""
return self._et
def _find(self, xpath: str = None, **kwargs) -> Optional["BoundTag"]:
if xpath is None:
xpath = ".//*"
if kwargs:
xpath += "".join(f"[@{k}='{v}']" for k, v in kwargs.items())
e = self._et.find(xpath)
if e is None:
return None
bound_tag = _get_bound_tag_factory(e.tag)(self._driver, self._raw_response, e)
return bound_tag
def __repr__(self) -> str:
return f"BoundTag[{self._et.tag}]"
def __str__(self) -> str:
return f"{self._et.tag}"
class BoundForm(BoundTag):
"""A ``BoundForm`` is a specialisation of the ``BoundTag`` class, returned
when the tag is a ``<form>`` element.
``BoundForm`` adds the ability to submit forms to the server.
>>> d = activesoup.Driver()
>>> page = d.get("https://github.com/jelford/activesoup/issues/new")
>>> f = page.form
>>> page = f.submit({"title": "Misleading examples", "body": "Examples appear to show interactions with GitHub.com but don't reflect GitHub's real page structure"})
>>> page.url
'https://github.com/jelford/activesoup/issues/1'
"""
def submit(
self, data: Dict, suppress_unspecified: bool = False
) -> "activesoup.Driver":
"""Submit the form to the server
:param Dict data: The values that should be provided for the various
fields in the submitted form. Keys should correspond to the form
inputs' ``name`` attribute, and may be simple string values, or
lists (in the case where a form input can take several values)
:param bool suppress_unspecified: If False (the default), then
``activesoup`` will augment the ``data`` parameter to include the
values of fields that are:
- not specified in the ``data`` parameter
- present with default values in the form as it was presented to
us.
The most common use-cases for this is to pick up fields with
``type="hidden"`` (commonly used for CSRF protection) or fields
with ``type="checkbox"`` (commonly some default values are ticked).
If the form has an ``action`` attribute specified, then the form will
be submitted to that URL. If the form does not specify a ``method``,
then ``POST`` will be used as a default.
"""
try:
action = self._et.attrib["action"]
except KeyError:
action = cast(str, self._raw_response.request.url)
try:
method = self._et.attrib["method"]
except KeyError:
method = "POST"
to_submit: Dict[str, Any] = {}
if not suppress_unspecified:
for i in self.find_all("input"):
type = i.attrs().get("type", "text")
if type in {"checkbox", "radio"}:
should_take_value = i.attrs().get("checked") is not None
else:
should_take_value = True
if should_take_value:
try:
if type != "checkbox":
to_submit[i["name"]] = i["value"]
else:
value = to_submit.get(i["name"])
if value is None:
value = []
value.append(i["value"])
to_submit[i["name"]] = value
except KeyError:
pass
to_submit.update(data)
req = requests.Request(method=method, url=action, data=to_submit)
return self._driver._do(req)
_BoundTagFactory = Callable[["activesoup.Driver", requests.Response, Element], BoundTag]
def resolve(driver: "activesoup.Driver", response: requests.Response) -> BoundTag:
parsed: Element = html5lib.parse(response.content)
return BoundTag(driver, response, _strip_namespace(parsed))
def _get_bound_tag_factory(tagname: str) -> _BoundTagFactory:
return {"form": BoundForm}.get(tagname, BoundTag)
| {
"repo_name": "jelford/activesoup",
"path": "src/activesoup/html.py",
"copies": "1",
"size": "11008",
"license": "mit",
"hash": -2311447842219989000,
"line_mean": 34.7402597403,
"line_max": 168,
"alpha_frac": 0.5795784884,
"autogenerated": false,
"ratio": 4.157099697885196,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01105240109918863,
"num_lines": 308
} |
from functools import lru_cache
from typing import Callable, Mapping, Sequence, List
from warnings import warn
import pandas as pd
class Gene:
"""Stores gene's identifier and description (multiton).
At a time there can be only one gene with given identifier,
i.e. after the first initialization, all subsequent attempts
to initialize a gene with the same identifier will return
exactly the same object. This is so called multiton pattern.
Example:
>>> x = Gene('TP53')
>>> y = Gene('TP53')
>>> assert x is y # passes, there is only one gene
"""
instances = {}
__slots__ = ('name', 'description', 'id')
def __new__(cls, *args, **kwargs):
if not args:
# for pickling the requirements are lessened
# ONLY for pickling
return super(Gene, cls).__new__(cls)
name = args[0]
if name not in cls.instances:
gene = super(Gene, cls).__new__(cls)
gene.__init__(*args, **kwargs)
gene.id = len(cls.instances) - 1
cls.instances[name] = gene
return cls.instances[name]
def __init__(self, name, description=None):
self.name = name
self.description = description
def __repr__(self):
return f'<Gene: {self.name}>'
class Sample:
"""Sample contains expression values for genes."""
def __init__(self, name, data: Mapping[Gene, float]):
self.name = name
self.data = data
@property
def genes(self):
return self.data.keys()
@classmethod
def from_names(cls, name, data: Mapping[str, float]):
"""Create a sample from a gene_name: value mapping.
Args:
name: name of sample
data: mapping (e.g. dict) where keys represent gene names
"""
return cls(name, {Gene(gene_name): value for gene_name, value in data.items()})
@classmethod
def from_array(cls, name, panda_series: pd.Series, descriptions=False):
"""Create a sample from pd.Series or equivalent.
Args:
name: name of the sample
panda_series:
series object where columns represent values of genes and
names are either gene identifiers of tuples:
``(gene_identifier, description)``
descriptions:
are descriptions present in names of the series object?
"""
gene_maker = Gene
if descriptions:
gene_maker = lambda data: Gene(*data)
return cls(name, {
gene_maker(key): value
for key, value in panda_series.to_dict().items()
})
def as_array(self):
"""
Returns: one-dimensional labeled array with Gene objects as labels
"""
return pd.Series(self.data)
def __eq__(self, other):
return self.name == other.name and self.data == other.data
def __repr__(self):
return f'<Sample "{self.name}" with {len(self.data)} genes>'
def first_line(file_object, skip_rows=0):
line = None
while not (line and skip_rows < 0):
line = file_object.readline()
if line:
skip_rows -= 1
# return to the beginning
file_object.seek(0)
return line
# TODO class variable with set of genes + method(s) for checking data integrity
class SampleCollection:
"""A collection of samples of common origin or characteristic.
An example sample_collection can be:
(Breast_cancer_sample_1, Breast_cancer_sample_2) named "Breast cancer".
The common origin/characteristics for "Breast cancer" sample_collection could be
"a breast tumour", though samples had been collected from two donors.
Another example are controls:
(Control_sample_1, Control_sample_2) named "Control".
The common characteristic for these samples is that both are controls.
"""
def __init__(self, name: str, samples=None):
self.samples: List[Sample] = samples or []
self.name = name
# integrity check
# Raises AssertionError if there is inconsistency in genes in samples.
# genes = self.samples[0].genes
# assert all(sample.genes == genes for sample in self.samples[1:])
@property
def labels(self):
return [sample.name for sample in self.samples]
@property
@lru_cache(maxsize=1)
def genes(self):
"""Return all genes present in the collection of samples."""
genes = self.samples[0].genes
return genes
@lru_cache(maxsize=None)
def of_gene(self, gene):
return tuple(
sample.data[gene]
for sample in self.samples
)
def as_array(self):
"""
Returns: :class:`pandas.DataFrame` object with data for all samples.
"""
return {s.name: pd.DataFrame(s) for s in self.samples}
def __add__(self, other):
return SampleCollection(self.name, self.samples + other.samples)
@classmethod
def from_file(
cls, name, file_object,
columns_selector: Callable[[Sequence[int]], Sequence[int]]=None,
samples=None, delimiter: str='\t', index_col: int=0,
use_header=True, reverse_selection=False, prefix=None,
header_line=0, description_column=None
):
"""Create a sample_collection (collection of samples) from csv/tsv file.
Args:
name:
a name of the sample_collection (or group of samples) which will
identify it (like "Tumour_1" or "Control_in_20_degrees")
file_object: a file (containing gene expression)
of the following structure:
- names of samples separated by a tab in the first row,
- gene symbol/name followed by gene expression values
for every sample in remaining rows;
an additional column "description" is allowed between genes
column and sample columns, though it has to be explicitly
declared with `description_column` argument.
columns_selector:
a function which will select (and return) a subset of
provided column identifiers (do not use with `samples`)
samples:
a list of names of samples to extract from the file
(do not use with `columns_selector`)
reverse_selection:
if you want to use all columns but the selected ones
(or all samples but the selected) set this to True
delimiter: the delimiter of the columns
index_col: column to use as the gene names
use_header: does the file have a header?
prefix: prefix for custom samples naming schema
header_line: number of non-empty line with sample names
None - do not use, 0 - use first row
description_column:
is column with description of present in the file
(on the second position, after gene identifiers)?
"""
if file_object.tell() != 0:
warn(f'Passed file object: {file_object} was read before.')
raise Exception()
line = first_line(file_object, header_line or 0)
header_items = [item.strip() for item in line.split('\t')]
gene_columns = [index_col]
if description_column:
description_column = 1
gene_columns.append(description_column)
else:
if any('description' == name.lower() for name in header_items):
warn(
'First line of your file contains "description" column, '
'but you did not provide "--description_column" argument.'
)
# a reasonable assumption is that the columns with samples
# start after columns with gene symbol and gene description
column_shift = max(gene_columns) + 1
if columns_selector:
# sniff how many columns do we have in the file
columns_count = line.count(delimiter)
all_sample_columns = list(range(column_shift, columns_count + 1))
# generate identifiers (numbers) for all columns
# and take the requested subset
columns = columns_selector(all_sample_columns)
if reverse_selection:
columns = list(columns)
columns = [c for c in all_sample_columns if c not in columns]
# https://github.com/pandas-dev/pandas/issues/9098#issuecomment-333677100
columns = gene_columns + list(columns)
else:
columns = None
if not use_header:
if samples:
raise ValueError(
'To select samples by their name, you need a file with '
'samples names in the header. If you use such file, '
'please set `use_header=True`, otherwise skip `samples` '
'in your arguments.'
)
if header_line:
warn(
'`header_line` has no effect when '
'`use_header` is set to `False`'
)
# we could leave it to pandas, but it shows an ugly,
# not very helpful message. It is better to show the
# user where exactly the problem occurs.
if samples:
available_samples = [
name
for name in header_items[column_shift:]
]
lacking_samples = set(samples) - set(available_samples)
if lacking_samples:
raise ValueError(
f'Samples {lacking_samples} are not available in {file_object.name} file.\n'
f'Following samples were found: {", ".join(available_samples)}.'
)
if index_col:
# TODO https://github.com/pandas-dev/pandas/issues/9098
warn(
'Using "samples" with "index_col" 0 may cause an '
'unexpected behaviour due to an upstream issue in '
'pandas package (pandas-dev/pandas/issues/9098) '
'for pandas in versions older than 0.21.'
)
additional_column_names = [
header_items[index]
for index in gene_columns
]
# https://github.com/pandas-dev/pandas/issues/9098#issuecomment-333677100
samples = additional_column_names + list(samples)
# just to reassure that the pointer is on the beginning
if file_object.tell() != 0:
warn('Passed file object was read before.')
if samples and columns:
warn(
'Please, provide either columns or samples, '
'not both. We will use columns this time.'
)
try:
data = pd.read_table(
file_object,
delimiter=delimiter,
# None - do not use, 0 - use first row
header=header_line if use_header else None,
index_col=gene_columns,
usecols=columns or samples,
prefix=f'{prefix}_' if prefix else ''
)
except Exception as e:
from traceback import print_tb
from traceback import print_stack
print_tb(e)
print(e)
descriptions = description_column is not None
samples = [
Sample.from_array(sample_name, sample_data, descriptions=descriptions)
for sample_name, sample_data in data.items()
]
return cls(name, samples)
@classmethod
def from_gct_file(cls, name, file_object, **kwargs):
"""Parse file in Gene Cluster Text file format, as defined on:
software.broadinstitute.org/cancer/software/gsea/wiki/index.php/Data_formats
User is allowed to provide settings different from the standard.
"""
version = file_object.readline()
rows_count, samples_count = map(int, file_object.readline().split('\t'))
default_values = {
'description_column': True,
'header_line': 2
}
if version != '#1.2\n':
warn('Unsupported version of GCT file')
file_object.seek(0)
for key, value in default_values.items():
kwargs[key] = value
self = cls.from_file(
name, file_object,
**kwargs
)
# if user did not choose a subset of samples
if not any(key in kwargs for key in ['samples', 'columns_selector']):
# check if the samples numbers are ok
if len(self.samples) != samples_count:
warn(
f'Samples count ({len(self.samples)}) '
f'does not match with the {samples_count} '
f'declared in {name} file.'
)
if rows_count != len(self.samples[0].genes):
warn(
f'Number of rows ({rows_count}) does not match '
f'with the {len(self.samples[0].genes)} '
f'declared in {name} file'
)
return self
@classmethod
def from_csv_file(cls, name, file_object, **kwargs):
if 'delimiter' in kwargs:
if kwargs['delimiter'] != ',':
warn(
'You are using not comma delimiter for what looks like csv file. '
'Is this really the thing you want to do?'
)
else:
kwargs['delimiter'] = ','
return cls.from_file(name, file_object, **kwargs)
# TODO class variable with set of genes + method(s) for checking data integrity
class Experiment:
def __init__(self, case: SampleCollection, control: SampleCollection):
self.control = control
self.case = case
def get_all(self):
return self.control + self.case
# TODO: are there many ways to compute fold-change?
def get_fold_change(self, sample_from_case, use_log=False):
assert sample_from_case in self.case.samples
# TODO: implement inline
calc_fold_change(sample_from_case, self.control, use_log=use_log)
"""
def fold_change(case, base, log2=False):
fold_changes = case.copy()
for (idx, row) in base.iterrows():
fold_changes.loc[[idx]] /= (np.mean(row) or 0.01) # TODO for now arbitrary value 0.01 when 0's are found
if log2:
fold_changes = np.log2(fold_changes) # TODO Runtime Warning when 0's are encountered
return fold_changes
"""
class Study:
def __init__(self, cases: Sequence[SampleCollection], control: SampleCollection):
for case in cases:
self.experiments = Experiment(case, control)
| {
"repo_name": "kn-bibs/pathways-analysis",
"path": "models.py",
"copies": "1",
"size": "15022",
"license": "mit",
"hash": -7627275604343066000,
"line_mean": 32.9864253394,
"line_max": 121,
"alpha_frac": 0.5673012914,
"autogenerated": false,
"ratio": 4.435193386477708,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5502494677877708,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from typing import Dict, List
from ._cell_widths import CELL_WIDTHS
from ._lru_cache import LRUCache
def cell_len(text: str, _cache: Dict[str, int] = LRUCache(1024 * 4)) -> int:
"""Get the number of cells required to display text.
Args:
text (str): Text to display.
Returns:
int: Number of cells required to display the text.
"""
cached_result = _cache.get(text, None)
if cached_result is not None:
return cached_result
_get_size = get_character_cell_size
total_size = sum(_get_size(character) for character in text)
if len(text) <= 64:
_cache[text] = total_size
return total_size
@lru_cache(maxsize=4096)
def get_character_cell_size(character: str) -> int:
"""Get the cell size of a character.
Args:
character (str): A single character.
Returns:
int: Number of cells (0, 1 or 2) occupied by that character.
"""
codepoint = ord(character)
if 127 > codepoint > 31:
# Shortcut for ascii
return 1
return _get_codepoint_cell_size(codepoint)
@lru_cache(maxsize=4096)
def _get_codepoint_cell_size(codepoint: int) -> int:
"""Get the cell size of a character.
Args:
character (str): A single character.
Returns:
int: Number of cells (0, 1 or 2) occupied by that character.
"""
_table = CELL_WIDTHS
lower_bound = 0
upper_bound = len(_table) - 1
index = (lower_bound + upper_bound) // 2
while True:
start, end, width = _table[index]
if codepoint < start:
upper_bound = index - 1
elif codepoint > end:
lower_bound = index + 1
else:
return 0 if width == -1 else width
if upper_bound < lower_bound:
break
index = (lower_bound + upper_bound) // 2
return 1
def set_cell_size(text: str, total: int) -> str:
"""Set the length of a string to fit within given number of cells."""
cell_size = cell_len(text)
if cell_size == total:
return text
if cell_size < total:
return text + " " * (total - cell_size)
_get_character_cell_size = get_character_cell_size
character_sizes = [_get_character_cell_size(character) for character in text]
excess = cell_size - total
pop = character_sizes.pop
while excess > 0 and character_sizes:
excess -= pop()
text = text[: len(character_sizes)]
if excess == -1:
text += " "
return text
def chop_cells(text: str, max_size: int, position: int = 0) -> List[str]:
"""Break text in to equal (cell) length strings."""
_get_character_cell_size = get_character_cell_size
characters = [
(character, _get_character_cell_size(character)) for character in text
][::-1]
total_size = position
lines: List[List[str]] = [[]]
append = lines[-1].append
pop = characters.pop
while characters:
character, size = pop()
if total_size + size > max_size:
lines.append([character])
append = lines[-1].append
total_size = size
else:
total_size += size
append(character)
return ["".join(line) for line in lines]
if __name__ == "__main__": # pragma: no cover
print(get_character_cell_size("😽"))
for line in chop_cells("""这是对亚洲语言支持的测试。面对模棱两可的想法,拒绝猜测的诱惑。""", 8):
print(line)
for n in range(80, 1, -1):
print(set_cell_size("""这是对亚洲语言支持的测试。面对模棱两可的想法,拒绝猜测的诱惑。""", n) + "|")
print("x" * n)
| {
"repo_name": "willmcgugan/rich",
"path": "rich/cells.py",
"copies": "1",
"size": "3685",
"license": "mit",
"hash": -2591204797837050400,
"line_mean": 27.464,
"line_max": 81,
"alpha_frac": 0.5947161327,
"autogenerated": false,
"ratio": 3.331460674157303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9425689001979254,
"avg_score": 0.0000975609756097561,
"num_lines": 125
} |
from functools import lru_cache
from typing import Dict, List, Optional
from debug_toolbar.panels.templates import TemplatesPanel as BaseTemplatesPanel
from django.conf import settings
from django.contrib import messages
from django.contrib.humanize.templatetags.humanize import naturaltime
from django.utils.html import strip_spaces_between_tags
from django.utils.translation import (
get_language_from_request, get_language_info)
from jinja2 import nodes
from jinja2.ext import Extension
from rjsmin import jsmin
ForumAuthForm = None
class JsMinExtension(Extension):
"""
Removes whitespace between JavaScript tags, including tab and
newline characters.
"""
tags = {'jsmin'}
def parse(self, parser):
lineno = parser.stream.__next__().lineno
body = parser.parse_statements(['name:endjsmin'], drop_needle=True)
return nodes.CallBlock(
self.call_method('_strip_whitespace_js', [], [], None, None),
[], [], body,
).set_lineno(lineno)
def _strip_whitespace_js(self, caller=None):
if settings.DEBUG:
# Debug mode, no minification
return caller().strip()
return jsmin(caller().strip())
# https://github.com/coffin/coffin/blob/master/coffin/common.py
class SpacelessExtension(Extension):
"""
Removes whitespace between HTML tags, including tab and
newline characters.
Works exactly like Django's own tag.
"""
tags = set(['spaceless'])
def parse(self, parser):
lineno = next(parser.stream).lineno
body = parser.parse_statements(['name:endspaceless'], drop_needle=True)
return nodes.CallBlock(
self.call_method('_strip_spaces', [], [], None, None),
[], [], body,
).set_lineno(lineno)
def _strip_spaces(self, caller=None):
return strip_spaces_between_tags(caller().strip())
class CommentExtension(Extension):
"""
Skips the content within the comment/endcomment tag.
"""
tags = set(['comment'])
def parse(self, parser):
next(parser.stream)
parser.parse_statements(['name:endcomment'], drop_needle=True)
return []
class MyLanguageInfoExtension(Extension):
"""
An assigment tag for Jinja, setting a language info dictionary.
Samples hacked together from:
http://stackoverflow.com/a/23170408/1067833
https://github.com/coffin/coffin/blob/master/coffin/static.py
"""
tags = set(['get_my_language_info'])
def parse(self, parser):
stream = parser.stream
lineno = next(stream).lineno
ctx_ref = nodes.ContextReference()
call_node = self.call_method(
'_get_current_language_info', [ctx_ref], lineno=lineno)
if stream.next_if('name:as'):
var = nodes.Name(stream.expect('name').value, 'store')
return nodes.Assign(var, call_node).set_lineno(lineno)
else:
return nodes.Output([call_node]).set_lineno(lineno)
def _get_current_language_info(self, context):
lang_code = get_language_from_request(request=context['request'])
return get_language_info(lang_code=lang_code)
class TemplatesPanel(BaseTemplatesPanel):
"""
A fix for django debug toolbar.
http://stackoverflow.com/a/39036820/1067833
"""
def generate_stats(self, *args):
if not self.templates:
return
template = self.templates[0]['template']
if not hasattr(template, 'engine') and hasattr(template, 'backend'):
template.engine = template.backend
return super().generate_stats(*args)
@lru_cache(maxsize=20)
def paginator_generic_get_list(
current_no: int, num_pages: int,
adjacent_pages: int = settings.PAGINATOR_DEFAULT_ADJACENT_PAGES
) -> Optional[List[Dict]]:
'Generate a paginator list with ellipsis.'
result = []
start_idx = max(current_no - adjacent_pages, 1)
if start_idx <= 3:
start_idx = 1
else:
result.extend([dict(number=1, type='number'), dict(type='ellipsis')])
end_idx = current_no + adjacent_pages + 1
do_end_ellipsis = True
if end_idx >= num_pages - 1:
end_idx = num_pages + 1
do_end_ellipsis = False
result.extend([
dict(number=x, type='number') for x in range(start_idx, end_idx)])
if do_end_ellipsis:
result.extend([
dict(type='ellipsis'), dict(number=num_pages, type='number')])
return result
def forum_auth_form():
global ForumAuthForm
if ForumAuthForm is None:
from forum.account.forms import ForumAuthForm
return ForumAuthForm
class ForumToolsExtension(Extension):
"""
Puts the settings variable and other utilities into the global
template context.
"""
def __init__(self, environment):
super(ForumToolsExtension, self).__init__(environment)
environment.globals['django_settings'] = settings
environment.globals['forum_auth_form'] = forum_auth_form()
environment.globals['paginator_generic_get_list'] = \
paginator_generic_get_list
environment.filters['naturaltime'] = naturaltime
environment.globals['messages'] = messages
| {
"repo_name": "karolyi/forum-django",
"path": "backend/forum/jinja2.py",
"copies": "1",
"size": "5219",
"license": "mit",
"hash": 1198952770549557800,
"line_mean": 29.8816568047,
"line_max": 79,
"alpha_frac": 0.6516574056,
"autogenerated": false,
"ratio": 3.9093632958801496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5061020701480149,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from typing import Iterable, Dict, Set, Tuple
from helpers.hashable_dict import HashableDict
from interfaces.expr import Signal
class Label(HashableDict): # TODO: use Expr instead (cube)
"""
_hashable_ dict: signal -> True/False
Label({}) means 'True'
And there is no need for 'False'
"""
pass
LABEL_TRUE = Label(dict())
class Node:
def __init__(self, name:str):
self.name = name # type: str
self.transitions = {} # type: Dict[Label, Set[Tuple['Node', bool]]]
# transitions is Dict: label -> {(dst_node, is_final), (dst_node, is_final), ...}
def add_transition(self,
label:Dict[Signal, bool],
dst_isFin_pairs:Iterable[Tuple['Node', bool]]):
""" Several calls with the same `label` is allowed -
this means that transition is 'non-deterministic' or 'universal'.
But the same destination should never appear twice in both calls (that is just strange).
:param: dst_isFin_pairs is of the form {(node, is_final), (node, is_final), ...}
"""
label = Label(label)
cur_dst_isFin_pairs = self.transitions[label] = self.transitions.get(label, set())
assert not set(dst_isFin_pairs).intersection(cur_dst_isFin_pairs), \
'\n'.join(map(str, [self.name,
label,
dst_isFin_pairs,
cur_dst_isFin_pairs]))
cur_dst_isFin_pairs.update(dst_isFin_pairs)
def __str__(self):
labels_strings = []
for l, dst_isFin_pairs in self.transitions.items():
labels_strings.append('[{0}: {1}]'.format(str(l), str(dst_isFin_pairs)))
return "{0}, transitions: {1}".format(self.name, ' '.join(labels_strings))
def __lt__(self, other):
return id(self) < id(other)
def __repr__(self):
return "{0}".format(self.name)
class Automaton:
def __init__(self,
init_nodes:Iterable[Node],
nodes:Iterable[Node],
name=''):
self.init_nodes = set(init_nodes) # type: Set[Node]
assert len(self.init_nodes) == 1, 'TODO: change signature'
self.nodes = set(nodes) # type: Set[Node]
self.name = name # type: str
def __str__(self):
return self.name or 'Unnamed' + \
"\nnodes:\n" + \
"\n".join([str(x) for x in self.nodes]) + \
"\n init nodes:\n" + \
"\n".join([str(x) for x in self.init_nodes])
__repr__ = __str__
# ------------------------------------------------------------------------
def is_satisfied(label, signal_values):
"""
Do signal values satisfy the label?
>>> is_satisfied({'r':True}, dict())
True
>>> is_satisfied(dict(), {'r':True})
True
>>> is_satisfied({'r':True}, {'r':True, 'g':False})
True
>>> is_satisfied({'r':True, 'g':False}, {'g':False})
True
>>> is_satisfied({'g':False}, {'r':True})
True
"""
for var, val in signal_values.items():
if var not in label:
continue
if label[var] != val:
return False
return True
@lru_cache()
def get_next_states(n:Node, i_o:Label) -> Set[Node]:
dst_nodes = set()
for lbl, node_flag_pairs in n.transitions.items():
dst = map(lambda node_flag: node_flag[0], node_flag_pairs)
if is_satisfied(lbl, i_o):
dst_nodes.update(dst)
return dst_nodes
| {
"repo_name": "5nizza/party-elli",
"path": "interfaces/automaton.py",
"copies": "1",
"size": "3598",
"license": "mit",
"hash": -1244348676524788000,
"line_mean": 30.5614035088,
"line_max": 100,
"alpha_frac": 0.5275152863,
"autogenerated": false,
"ratio": 3.7169421487603307,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.474445743506033,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from typing import List
import librosa
import numpy as np
from mugen import paths
from mugen.events import EventList, Event
class AudioEvent(Event):
"""
An event in some audio
"""
pass
class End(AudioEvent):
"""
The end of some audio
"""
pass
class Beat(AudioEvent):
"""
A beat in some audio
"""
pass
class WeakBeat(Beat):
"""
A weak beat in some audio
"""
pass
class Onset(AudioEvent):
"""
An onset in some audio
"""
pass
class Audio:
"""
Wraps the audio ouput from librosa, providing access to extra features
Attributes
----------
file
Loaded audio file
samples
Audio samples
sample_rate
Audio sample rate
duration
Audio duration (seconds)
"""
file: str
sample_rate: int
samples: np.ndarray
duration: float
def __init__(self, file: str, *, sample_rate: int = 44100):
"""
Parameters
----------
file
Audio file to load
"""
self.file = file
self.samples, self.sample_rate = librosa.load(file, sr=sample_rate)
self.duration = librosa.get_duration(y=self.samples, sr=self.sample_rate)
def __repr__(self):
filename = paths.filename_from_path(self.file)
return f'<Audio, file: {filename}, duration: {self.duration}>'
def beats(self, trim: bool = False) -> EventList:
"""
Gets beat events
Parameters
----------
trim
Label weak leading and trailing beats separately
Returns
-------
Detected beat events from the audio
"""
untrimmed_beats = self._beats()
untrimmed_beats = EventList([Beat(beat) for beat in untrimmed_beats], end=self.duration)
if not trim:
beats = untrimmed_beats
else:
trimmed_beats = self._beats(trim=True)
trimmed_leading_beats = [beat for beat in untrimmed_beats.locations if beat < trimmed_beats[0]]
trimmed_trailing_beats = [beat for beat in untrimmed_beats.locations if beat > trimmed_beats[-1]]
# Mark leading & trailing trimmed beats as weak beats
trimmed_beats = EventList([Beat(beat) for beat in trimmed_beats], end=self.duration)
trimmed_leading_beats = EventList([WeakBeat(beat) for beat in trimmed_leading_beats], end=self.duration)
trimmed_trailing_beats = EventList([WeakBeat(beat) for beat in trimmed_trailing_beats], end=self.duration)
beats = trimmed_leading_beats + trimmed_beats + trimmed_trailing_beats
return beats
@lru_cache(maxsize=None)
def _beats(self, trim: bool = False) -> List[float]:
"""
Gets beat locations using librosa's beat tracker
Parameters
----------
trim
Whether to discard weak beats
Returns
-------
Beat locations
"""
if trim:
tempo, beats = librosa.beat.beat_track(y=self.samples, sr=self.sample_rate, units='time',
trim=True)
else:
tempo, beats = librosa.beat.beat_track(y=self.samples, sr=self.sample_rate, units='time',
trim=False)
return beats
def onsets(self, backtrack: bool = False) -> EventList:
"""
Gets onset events
Parameters
----------
backtrack
Shift onset events back to the nearest local minimum of energy
Returns
-------
Detected onset events from the audio
"""
if not backtrack:
onsets = self._onsets()
else:
onsets = self._onsets(backtrack=True)
onsets = EventList([Onset(onset) for onset in onsets], end=self.duration)
return onsets
@lru_cache(maxsize=None)
def _onsets(self, backtrack: bool = False):
"""
Gets onset locations using librosa's onset detector.
Parameters
----------
backtrack
Whether to shift onset events back to the nearest local minimum of energy
Returns
-------
Onset locations
"""
if backtrack:
onsets = librosa.beat.onset.onset_detect(y=self.samples, sr=self.sample_rate, units='time', backtrack=True)
else:
onsets = librosa.beat.onset.onset_detect(y=self.samples, sr=self.sample_rate, units='time', backtrack=False)
return onsets
| {
"repo_name": "scherroman/mugen",
"path": "mugen/audio/Audio.py",
"copies": "1",
"size": "4727",
"license": "mit",
"hash": -3696941281759082000,
"line_mean": 25.1160220994,
"line_max": 120,
"alpha_frac": 0.5553204993,
"autogenerated": false,
"ratio": 4.114012184508268,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5169332683808268,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from typing import List, Tuple, Iterable, Dict, Optional
import numpy as np
from opensfm import bow
from opensfm import feature_loader
from opensfm.dataset import DataSetBase
def unnormalized_vlad(features: np.ndarray, centers: np.ndarray) -> np.ndarray:
"""Compute unnormalized VLAD histograms from a set of
features in relation to centers.
Returns the unnormalized VLAD vector.
"""
vlad = np.zeros(centers.shape, dtype=np.float32)
for f in features:
i = np.argmin(np.linalg.norm(f - centers, axis=1))
vlad[i, :] += f - centers[i]
return vlad.flatten()
def signed_square_root_normalize(v: np.ndarray) -> np.ndarray:
"""Compute Signed Square Root (SSR) normalization on
a vector.
Returns the SSR normalized vector.
"""
v = np.sign(v) * np.sqrt(np.abs(v))
v /= np.linalg.norm(v)
return v
def vlad_distances(
image: str, other_images: Iterable[str], histograms: Dict[str, np.ndarray]
) -> Tuple[str, List[float], List[str]]:
"""Compute VLAD-based distance (L2 on VLAD-histogram)
between an image and other images.
Returns the image, the order of the other images,
and the other images.
"""
if image not in histograms:
return image, [], []
distances = []
other = []
h = histograms[image]
for im2 in other_images:
if im2 != image and im2 in histograms:
h2 = histograms[im2]
distances.append(np.linalg.norm(h - h2))
other.append(im2)
return image, distances, other
class VladCache(object):
@lru_cache(1)
def load_words(self, data: DataSetBase) -> np.ndarray:
words, _ = bow.load_vlad_words_and_frequencies(data.config)
return words
@lru_cache(1000)
def vlad_histogram(self, data: DataSetBase, image: str) -> Optional[np.ndarray]:
words = self.load_words(data)
features_data = feature_loader.instance.load_all_data(data, image, masked=True)
if features_data is None:
return None
descriptors = features_data.descriptors
if descriptors is None:
return None
vlad = unnormalized_vlad(descriptors, words)
vlad = signed_square_root_normalize(vlad)
return vlad
instance = VladCache()
| {
"repo_name": "mapillary/OpenSfM",
"path": "opensfm/vlad.py",
"copies": "1",
"size": "2311",
"license": "bsd-2-clause",
"hash": 7029261816028634000,
"line_mean": 29.012987013,
"line_max": 87,
"alpha_frac": 0.649935093,
"autogenerated": false,
"ratio": 3.5228658536585367,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46728009466585363,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from typing import Optional
from django.core.exceptions import FieldDoesNotExist
from django.db import models
from django.db.models import QuerySet
from rest_framework.compat import coreapi, coreschema
from rest_framework.filters import BaseFilterBackend
from rest_framework.request import Request
from rest_framework.viewsets import GenericViewSet
from rest_flex_fields.serializers import FlexFieldsSerializerMixin
class FlexFieldsFilterBackend(BaseFilterBackend):
def filter_queryset(
self, request: Request, queryset: QuerySet, view: GenericViewSet
):
if (
not issubclass(view.get_serializer_class(), FlexFieldsSerializerMixin)
or request.method != "GET"
):
return queryset
auto_remove_fields_from_query = getattr(
view, "auto_remove_fields_from_query", True
)
auto_select_related_on_query = getattr(
view, "auto_select_related_on_query", True
)
required_query_fields = list(getattr(view, "required_query_fields", []))
serializer = view.get_serializer( # type: FlexFieldsSerializerMixin
context=view.get_serializer_context()
)
serializer.apply_flex_fields()
model_fields = [
self._get_field(field.source, queryset.model)
for field in serializer.fields.values()
if self._get_field(field.source, queryset.model)
]
nested_model_fields = [
self._get_field(field.source, queryset.model)
for field in serializer.fields.values()
if self._get_field(field.source, queryset.model)
and field.field_name in serializer.expanded_fields
]
if auto_remove_fields_from_query:
queryset = queryset.only(
*(
required_query_fields
+ [
model_field.name
for model_field in model_fields
if not model_field.is_relation or model_field.many_to_one
]
)
)
if auto_select_related_on_query and nested_model_fields:
queryset = queryset.select_related(
*(
model_field.name
for model_field in nested_model_fields
if model_field.is_relation and model_field.many_to_one
)
)
queryset = queryset.prefetch_related(
*(
model_field.name
for model_field in nested_model_fields
if model_field.is_relation and not model_field.many_to_one
)
)
return queryset
@staticmethod
@lru_cache()
def _get_field(field_name: str, model: models.Model) -> Optional[models.Field]:
try:
# noinspection PyProtectedMember
return model._meta.get_field(field_name)
except FieldDoesNotExist:
return None
def get_schema_fields(self, view):
assert (
coreapi is not None
), "coreapi must be installed to use `get_schema_fields()`"
assert (
coreschema is not None
), "coreschema must be installed to use `get_schema_fields()`"
if not issubclass(view.get_serializer_class(), FlexFieldsSerializerMixin):
return []
return [
coreapi.Field(
name="fields",
required=False,
location="query",
schema=coreschema.String(
title="Selected fields",
description="Specify required field by comma",
),
example="field1,field2,nested.field",
),
coreapi.Field(
name="omit",
required=False,
location="query",
schema=coreschema.String(
title="Omitted fields",
description="Specify required field by comma",
),
example="field1,field2,nested.field",
),
coreapi.Field(
name="expand",
required=False,
location="query",
schema=coreschema.String(
title="Expanded fields",
description="Specify required nested items by comma",
),
example="nested1,nested2",
),
]
def get_schema_operation_parameters(self, view):
if not issubclass(view.get_serializer_class(), FlexFieldsSerializerMixin):
return []
parameters = [
{
"name": "fields",
"required": False,
"in": "query",
"description": "Specify required field by comma",
"schema": {
"title": "Selected fields",
"type": "string",
},
"example": "field1,field2,nested.field",
},
{
"name": "omit",
"required": False,
"in": "query",
"description": "Specify required field by comma",
"schema": {
"title": "Omitted fields",
"type": "string",
},
"example": "field1,field2,nested.field",
},
{
"name": "expand",
"required": False,
"in": "query",
"description": "Specify required field by comma",
"schema": {
"title": "Expanded fields",
"type": "string",
},
"example": "nested1,nested2",
},
]
return parameters
| {
"repo_name": "rsinger86/drf-flex-fields",
"path": "rest_flex_fields/filter_backends.py",
"copies": "1",
"size": "5946",
"license": "mit",
"hash": -4576592348556671500,
"line_mean": 32.593220339,
"line_max": 83,
"alpha_frac": 0.5082408342,
"autogenerated": false,
"ratio": 4.967418546365915,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5975659380565915,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from typing import Optional
from indy_crypto.bls import ProofOfPossession, VerKey
from common.serializers.serialization import pool_state_serializer
from crypto.bls.indy_crypto.bls_crypto_indy_crypto import IndyCryptoBlsUtils
from plenum.common.constants import POOL_LEDGER_ID, NODE, DATA, BLS_KEY, \
BLS_KEY_PROOF, TARGET_NYM, DOMAIN_LEDGER_ID, NODE_IP, \
NODE_PORT, CLIENT_IP, CLIENT_PORT, ALIAS, VERKEY
from plenum.common.exceptions import InvalidClientRequest, UnauthorizedClientRequest
from plenum.common.request import Request
from plenum.common.txn_util import get_payload_data, get_from
from plenum.common.types import f
from plenum.server.database_manager import DatabaseManager
from plenum.server.request_handlers.handler_interfaces.write_request_handler import WriteRequestHandler
from plenum.server.request_handlers.utils import is_steward
from stp_core.crypto.util import base58_is_correct_ed25519_key
class NodeHandler(WriteRequestHandler):
state_serializer = pool_state_serializer
def __init__(self, database_manager: DatabaseManager, bls_crypto_verifier):
super().__init__(database_manager, NODE, POOL_LEDGER_ID)
self.bls_crypto_verifier = bls_crypto_verifier
def static_validation(self, request: Request):
self._validate_request_type(request)
dest = request.operation.get(TARGET_NYM)
if not base58_is_correct_ed25519_key(dest):
raise InvalidClientRequest(request.identifier, request.reqId,
"Node's dest is not correct Ed25519 key. Dest: {}".format(dest))
verkey = request.operation.get(VERKEY, None)
if verkey:
if not base58_is_correct_ed25519_key(verkey):
raise InvalidClientRequest(request.identifier, request.reqId,
"Node's verkey is not correct Ed25519 key. Verkey: {}".format(verkey))
blskey = request.operation.get(DATA).get(BLS_KEY, None)
blskey_proof = request.operation.get(DATA).get(BLS_KEY_PROOF, None)
if blskey is None and blskey_proof is None:
return
if blskey is None and blskey_proof is not None:
raise InvalidClientRequest(request.identifier, request.reqId,
"A Proof of possession is not "
"needed without BLS key")
if blskey is not None and blskey_proof is None:
raise InvalidClientRequest(request.identifier, request.reqId,
"A Proof of possession must be "
"provided with BLS key")
if not self._verify_bls_key_proof_of_possession(blskey_proof,
blskey):
raise InvalidClientRequest(request.identifier, request.reqId,
"Proof of possession {} is incorrect "
"for BLS key {}".
format(blskey_proof, blskey))
def gen_state_key(self, txn):
node_nym = get_payload_data(txn).get(TARGET_NYM)
return node_nym.encode()
def dynamic_validation(self, request: Request, req_pp_time: Optional[int]):
self._validate_request_type(request)
node_nym = request.operation.get(TARGET_NYM)
if self.get_from_state(node_nym, is_committed=False):
error = self._auth_error_while_updating_node(request)
else:
error = self._auth_error_while_adding_node(request)
if error:
raise UnauthorizedClientRequest(request.identifier, request.reqId,
error)
def update_state(self, txn, prev_result, request, is_committed=False):
self._validate_txn_type(txn)
node_nym = get_payload_data(txn).get(TARGET_NYM)
data = get_payload_data(txn).get(DATA, {})
existing_data = self.get_from_state(node_nym, is_committed=is_committed)
# Node data did not exist in state, so this is a new node txn,
# hence store the author of the txn (steward of node)
if not existing_data:
existing_data[f.IDENTIFIER.nm] = get_from(txn)
existing_data.update(data)
key = self.gen_state_key(txn)
val = self.state_serializer.serialize(existing_data)
self.state.set(key, val)
def _auth_error_while_adding_node(self, request):
origin = request.identifier
operation = request.operation
data = operation.get(DATA, {})
error = self._data_error_while_validating(data, skip_keys=False)
if error:
return error
if self._steward_has_node(origin):
return "{} already has a node".format(origin)
error = self._is_node_data_conflicting(data)
if error:
return "existing data has conflicts with " \
"request data {}. Error: {}".format(operation.get(DATA), error)
def _auth_error_while_updating_node(self, request):
# Check if steward of the node is updating it and its data does not
# conflict with any existing node's data
operation = request.operation
node_nym = operation.get(TARGET_NYM)
data = operation.get(DATA, {})
return self._data_error_while_validating_update(data, node_nym)
def _decode_state_value(self, encoded):
if encoded:
return self.state_serializer.deserialize(encoded)
return {}
def get_all_node_data_for_root_hash(self, root_hash):
leaves = self.state.get_all_leaves_for_root_hash(root_hash)
raw_node_data = leaves.values()
nodes = list(map(lambda x: self.state_serializer.deserialize(
self.state.get_decoded(x)), raw_node_data))
return nodes
def _is_steward(self, nym, is_committed: bool = True):
domain_state = self.database_manager.get_database(DOMAIN_LEDGER_ID).state
return is_steward(domain_state, nym, is_committed)
@lru_cache(maxsize=64)
def _is_steward_of_node(self, steward_nym, node_nym, is_committed=True):
node_data = self.get_from_state(node_nym, is_committed=is_committed)
return node_data and node_data[f.IDENTIFIER.nm] == steward_nym
def _steward_has_node(self, steward_nym) -> bool:
# Cannot use lru_cache since a steward might have a node in future and
# unfortunately lru_cache does not allow single entries to be cleared
# TODO: Modify lru_cache to clear certain entities
for nodeNym, nodeData in self.state.as_dict.items():
nodeData = self.state_serializer.deserialize(nodeData)
if nodeData.get(f.IDENTIFIER.nm) == steward_nym:
return True
return False
@staticmethod
def _data_error_while_validating(data, skip_keys):
req_keys = {NODE_IP, NODE_PORT, CLIENT_IP, CLIENT_PORT, ALIAS}
if not skip_keys and not req_keys.issubset(set(data.keys())):
return 'Missing some of {}'.format(req_keys)
nip = data.get(NODE_IP, 'nip')
np = data.get(NODE_PORT, 'np')
cip = data.get(CLIENT_IP, 'cip')
cp = data.get(CLIENT_PORT, 'cp')
if (nip, np) == (cip, cp):
return 'node and client ha cannot be same'
def _is_node_data_same(self, node_nym, new_data, is_committed=True):
node_info = self.get_from_state(node_nym, is_committed=is_committed)
node_info.pop(f.IDENTIFIER.nm, None)
return node_info == new_data
def _is_node_data_conflicting(self, new_data, updating_nym=None):
# Check if node's ALIAS or IPs or ports conflicts with other nodes,
# also, the node is not allowed to change its alias.
# Check ALIAS change
if updating_nym:
old_alias = self.get_from_state(updating_nym, is_committed=False).get(ALIAS)
new_alias = new_data.get(ALIAS)
if old_alias != new_alias:
return "Node's alias cannot be changed"
nodes = self.state.as_dict.items()
for other_node_nym, other_node_data in nodes:
other_node_nym = other_node_nym.decode()
other_node_data = self.state_serializer.deserialize(other_node_data)
if not updating_nym or other_node_nym != updating_nym:
# The node's ip, port and alias should be unique
same_alias = new_data.get(ALIAS) == other_node_data.get(ALIAS)
if same_alias:
return "Node's alias must be unique"
same_node_ha = (new_data.get(NODE_IP), new_data.get(NODE_PORT)) == \
(other_node_data.get(NODE_IP), other_node_data.get(NODE_PORT))
if same_node_ha:
return "Node's nodestack addresses must be unique"
same_cli_ha = (new_data.get(CLIENT_IP), new_data.get(CLIENT_PORT)) == \
(other_node_data.get(CLIENT_IP), other_node_data.get(CLIENT_PORT))
if same_cli_ha:
return "Node's clientstack addresses must be unique"
def _data_error_while_validating_update(self, data, node_nym):
error = self._data_error_while_validating(data, skip_keys=True)
if error:
return error
if self._is_node_data_same(node_nym, data, is_committed=False):
return "node already has the same data as requested"
error = self._is_node_data_conflicting(data, node_nym)
if error:
return "existing data has conflicts with " \
"request data {}. Error: {}".format(data, error)
def _verify_bls_key_proof_of_possession(self, key_proof, pk):
if self.bls_crypto_verifier is None:
return True
key_proof_bls = IndyCryptoBlsUtils.bls_from_str(key_proof, cls=ProofOfPossession)
pk_bls = IndyCryptoBlsUtils.bls_from_str(pk, cls=VerKey)
return self.bls_crypto_verifier.verify_key_proof_of_possession(key_proof_bls,
pk_bls)
| {
"repo_name": "evernym/zeno",
"path": "plenum/server/request_handlers/node_handler.py",
"copies": "1",
"size": "10182",
"license": "apache-2.0",
"hash": -5821467442304997000,
"line_mean": 47.2559241706,
"line_max": 113,
"alpha_frac": 0.6166764879,
"autogenerated": false,
"ratio": 3.6877942774357115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4804470765335711,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from typing import Optional
from ursa.bls import ProofOfPossession, VerKey
from common.serializers.serialization import pool_state_serializer
from crypto.bls.indy_crypto.bls_crypto_indy_crypto import IndyCryptoBlsUtils
from plenum.common.constants import POOL_LEDGER_ID, NODE, DATA, BLS_KEY, \
BLS_KEY_PROOF, TARGET_NYM, DOMAIN_LEDGER_ID, NODE_IP, \
NODE_PORT, CLIENT_IP, CLIENT_PORT, ALIAS, VERKEY
from plenum.common.exceptions import InvalidClientRequest, UnauthorizedClientRequest
from plenum.common.request import Request
from plenum.common.txn_util import get_payload_data, get_from
from plenum.common.types import f
from plenum.server.database_manager import DatabaseManager
from plenum.server.request_handlers.handler_interfaces.write_request_handler import WriteRequestHandler
from plenum.server.request_handlers.utils import is_steward
from stp_core.crypto.util import base58_is_correct_ed25519_key
class NodeHandler(WriteRequestHandler):
state_serializer = pool_state_serializer
def __init__(self, database_manager: DatabaseManager, bls_crypto_verifier):
super().__init__(database_manager, NODE, POOL_LEDGER_ID)
self.bls_crypto_verifier = bls_crypto_verifier
def static_validation(self, request: Request):
self._validate_request_type(request)
dest = request.operation.get(TARGET_NYM)
if not base58_is_correct_ed25519_key(dest):
raise InvalidClientRequest(request.identifier, request.reqId,
"Node's dest is not correct Ed25519 key. Dest: {}".format(dest))
verkey = request.operation.get(VERKEY, None)
if verkey:
if not base58_is_correct_ed25519_key(verkey):
raise InvalidClientRequest(request.identifier, request.reqId,
"Node's verkey is not correct Ed25519 key. Verkey: {}".format(verkey))
blskey = request.operation.get(DATA).get(BLS_KEY, None)
blskey_proof = request.operation.get(DATA).get(BLS_KEY_PROOF, None)
if blskey is None and blskey_proof is None:
return
if blskey is None and blskey_proof is not None:
raise InvalidClientRequest(request.identifier, request.reqId,
"A Proof of possession is not "
"needed without BLS key")
if blskey is not None and blskey_proof is None:
raise InvalidClientRequest(request.identifier, request.reqId,
"A Proof of possession must be "
"provided with BLS key")
if not self._verify_bls_key_proof_of_possession(blskey_proof,
blskey):
raise InvalidClientRequest(request.identifier, request.reqId,
"Proof of possession {} is incorrect "
"for BLS key {}".
format(blskey_proof, blskey))
def gen_state_key(self, txn):
node_nym = get_payload_data(txn).get(TARGET_NYM)
return node_nym.encode()
def dynamic_validation(self, request: Request, req_pp_time: Optional[int]):
self._validate_request_type(request)
node_nym = request.operation.get(TARGET_NYM)
if self.get_from_state(node_nym, is_committed=False):
error = self._auth_error_while_updating_node(request)
else:
error = self._auth_error_while_adding_node(request)
if error:
raise UnauthorizedClientRequest(request.identifier, request.reqId,
error)
def update_state(self, txn, prev_result, request, is_committed=False):
self._validate_txn_type(txn)
node_nym = get_payload_data(txn).get(TARGET_NYM)
data = get_payload_data(txn).get(DATA, {})
existing_data = self.get_from_state(node_nym, is_committed=is_committed)
# Node data did not exist in state, so this is a new node txn,
# hence store the author of the txn (steward of node)
if not existing_data:
existing_data[f.IDENTIFIER.nm] = get_from(txn)
existing_data.update(data)
key = self.gen_state_key(txn)
val = self.state_serializer.serialize(existing_data)
self.state.set(key, val)
def _auth_error_while_adding_node(self, request):
origin = request.identifier
operation = request.operation
data = operation.get(DATA, {})
error = self._data_error_while_validating(data, skip_keys=False)
if error:
return error
if self._steward_has_node(origin):
return "{} already has a node".format(origin)
error = self._is_node_data_conflicting(data)
if error:
return "existing data has conflicts with " \
"request data {}. Error: {}".format(operation.get(DATA), error)
def _auth_error_while_updating_node(self, request):
# Check if steward of the node is updating it and its data does not
# conflict with any existing node's data
operation = request.operation
node_nym = operation.get(TARGET_NYM)
data = operation.get(DATA, {})
return self._data_error_while_validating_update(data, node_nym)
def _decode_state_value(self, encoded):
if encoded:
return self.state_serializer.deserialize(encoded)
return {}
def get_all_node_data_for_root_hash(self, root_hash):
leaves = self.state.get_all_leaves_for_root_hash(root_hash)
raw_node_data = leaves.values()
nodes = list(map(lambda x: self.state_serializer.deserialize(
self.state.get_decoded(x)), raw_node_data))
return nodes
def _is_steward(self, nym, is_committed: bool = True):
domain_state = self.database_manager.get_database(DOMAIN_LEDGER_ID).state
return is_steward(domain_state, nym, is_committed)
@lru_cache(maxsize=64)
def _is_steward_of_node(self, steward_nym, node_nym, is_committed=True):
node_data = self.get_from_state(node_nym, is_committed=is_committed)
return node_data and node_data[f.IDENTIFIER.nm] == steward_nym
def _steward_has_node(self, steward_nym) -> bool:
# Cannot use lru_cache since a steward might have a node in future and
# unfortunately lru_cache does not allow single entries to be cleared
# TODO: Modify lru_cache to clear certain entities
for nodeNym, nodeData in self.state.as_dict.items():
nodeData = self.state_serializer.deserialize(nodeData)
if nodeData.get(f.IDENTIFIER.nm) == steward_nym:
return True
return False
@staticmethod
def _data_error_while_validating(data, skip_keys):
req_keys = {NODE_IP, NODE_PORT, CLIENT_IP, CLIENT_PORT, ALIAS}
if not skip_keys and not req_keys.issubset(set(data.keys())):
return 'Missing some of {}'.format(req_keys)
nip = data.get(NODE_IP, 'nip')
np = data.get(NODE_PORT, 'np')
cip = data.get(CLIENT_IP, 'cip')
cp = data.get(CLIENT_PORT, 'cp')
if (nip, np) == (cip, cp):
return 'node and client ha cannot be same'
def _is_node_data_same(self, node_nym, new_data, is_committed=True):
node_info = self.get_from_state(node_nym, is_committed=is_committed)
node_info.pop(f.IDENTIFIER.nm, None)
return node_info == new_data
def _is_node_data_conflicting(self, new_data, updating_nym=None):
# Check if node's ALIAS or IPs or ports conflicts with other nodes,
# also, the node is not allowed to change its alias.
# Check ALIAS change
if updating_nym:
old_alias = self.get_from_state(updating_nym, is_committed=False).get(ALIAS)
new_alias = new_data.get(ALIAS)
if old_alias != new_alias:
return "Node's alias cannot be changed"
nodes = self.state.as_dict.items()
for other_node_nym, other_node_data in nodes:
other_node_nym = other_node_nym.decode()
other_node_data = self.state_serializer.deserialize(other_node_data)
if not updating_nym or other_node_nym != updating_nym:
# The node's ip, port and alias should be unique
same_alias = new_data.get(ALIAS) == other_node_data.get(ALIAS)
if same_alias:
return "Node's alias must be unique"
same_node_ha = (new_data.get(NODE_IP), new_data.get(NODE_PORT)) == \
(other_node_data.get(NODE_IP), other_node_data.get(NODE_PORT))
if same_node_ha:
return "Node's nodestack addresses must be unique"
same_cli_ha = (new_data.get(CLIENT_IP), new_data.get(CLIENT_PORT)) == \
(other_node_data.get(CLIENT_IP), other_node_data.get(CLIENT_PORT))
if same_cli_ha:
return "Node's clientstack addresses must be unique"
def _data_error_while_validating_update(self, data, node_nym):
error = self._data_error_while_validating(data, skip_keys=True)
if error:
return error
if self._is_node_data_same(node_nym, data, is_committed=False):
return "node already has the same data as requested"
error = self._is_node_data_conflicting(data, node_nym)
if error:
return "existing data has conflicts with " \
"request data {}. Error: {}".format(data, error)
def _verify_bls_key_proof_of_possession(self, key_proof, pk):
if self.bls_crypto_verifier is None:
return True
key_proof_bls = IndyCryptoBlsUtils.bls_from_str(key_proof, cls=ProofOfPossession)
pk_bls = IndyCryptoBlsUtils.bls_from_str(pk, cls=VerKey)
return self.bls_crypto_verifier.verify_key_proof_of_possession(key_proof_bls,
pk_bls)
| {
"repo_name": "evernym/plenum",
"path": "plenum/server/request_handlers/node_handler.py",
"copies": "1",
"size": "10175",
"license": "apache-2.0",
"hash": 2232331412928904400,
"line_mean": 47.2227488152,
"line_max": 113,
"alpha_frac": 0.6165110565,
"autogenerated": false,
"ratio": 3.6879304095686845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9800489973905235,
"avg_score": 0.0007902984326898534,
"num_lines": 211
} |
from functools import lru_cache
from typing import Sequence, Dict, Union, Tuple, List, Optional, cast, Iterable
import numpy as np
import quimb
import quimb.tensor as qtn
import cirq
@lru_cache()
def _qpos_tag(qubits: Union[cirq.LineQubit, Tuple[cirq.LineQubit]]):
"""Given a qubit or qubits, return a "position tag" (used for drawing).
For multiple qubits, the tag is for the first qubit.
"""
if isinstance(qubits, cirq.LineQubit):
return _qpos_tag((qubits,))
x = min(q.x for q in qubits)
return f'q{x}'
@lru_cache()
def _qpos_y(qubits: Union[cirq.LineQubit, Tuple[cirq.LineQubit]], tot_n_qubits: int):
"""Given a qubit or qubits, return the position y value (used for drawing).
For multiple qubits, the position is the mean of the qubit indices.
This "flips" the coordinate so qubit 0 is at the maximal y position.
Args:
qubits: The qubits involved in the tensor.
tot_n_qubits: The total number of qubits in the circuit, allowing us
to position the zero'th qubit at the top.
"""
if isinstance(qubits, cirq.LineQubit):
return _qpos_y((qubits,), tot_n_qubits)
x = np.mean([q.x for q in qubits]).item()
return tot_n_qubits - x - 1
def _add_to_positions(
positions: Dict[Tuple[str, str], Tuple[float, float]],
mi: int,
qubits: Union[cirq.LineQubit, Tuple[cirq.LineQubit]],
*,
tot_n_qubits: int,
x_scale,
y_scale,
x_nudge,
yb_offset,
):
"""Helper function to update the `positions` dictionary.
Args:
positions: The dictionary to update. Quimb will consume this for drawing
mi: Moment index (used for x-positioning)
qubits: The qubits (used for y-positioning)
tot_n_qubits: The total number of qubits in the circuit, allowing us
to position the zero'th qubit at the top.
x_scale: Stretch coordinates in the x direction
y_scale: Stretch coordinates in the y direction
x_nudge: Kraus operators will have vertical lines connecting the
"forward" and "backward" circuits, so the x position of each
tensor is nudged (according to its y position) to help see all
the lines.
yb_offset: Offset the "backwards" circuit by this much.
"""
qy = _qpos_y(qubits, tot_n_qubits)
positions[(f'i{mi}f', _qpos_tag(qubits))] = (mi * x_scale + qy * x_nudge, y_scale * qy)
positions[(f'i{mi}b', _qpos_tag(qubits))] = (mi * x_scale, y_scale * qy + yb_offset)
def circuit_to_density_matrix_tensors(
circuit: cirq.Circuit, qubits: Optional[Sequence[cirq.LineQubit]] = None
) -> Tuple[List[qtn.Tensor], Dict['cirq.Qid', int], Dict[Tuple[str, str], Tuple[float, float]]]:
"""Given a circuit with mixtures or channels, construct a tensor network
representation of the density matrix.
This assumes you start in the |0..0><0..0| state. Indices are named
"nf{i}_q{x}" and "nb{i}_q{x}" where i is a time index and x is a
qubit index. nf- and nb- refer to the "forwards" and "backwards"
copies of the circuit. Kraus indices are named "k{j}" where j is an
independent "kraus" internal index which you probably never need to access.
Args:
circuit: The circuit containing operations that support the
cirq.unitary() or cirq.kraus() protocols.
qubits: The qubits in the circuit.
Returns:
tensors: A list of Quimb Tensor objects
qubit_frontier: A mapping from qubit to time index at the end of
the circuit. This can be used to deduce the names of the free
tensor indices.
positions: A positions dictionary suitable for passing to tn.graph()'s
`fix` argument to draw the resulting tensor network similar to a
quantum circuit.
"""
if qubits is None:
# coverage: ignore
qubits = sorted(cast(Iterable[cirq.LineQubit], circuit.all_qubits()))
qubit_frontier: Dict[cirq.Qid, int] = {q: 0 for q in qubits}
kraus_frontier = 0
positions: Dict[Tuple[str, str], Tuple[float, float]] = {}
tensors: List[qtn.Tensor] = []
x_scale = 2
y_scale = 3
x_nudge = 0.3
n_qubits = len(qubits)
yb_offset = (n_qubits + 0.5) * y_scale
def _positions(mi, qubits):
return _add_to_positions(
positions,
mi,
qubits,
tot_n_qubits=n_qubits,
x_scale=x_scale,
y_scale=y_scale,
x_nudge=x_nudge,
yb_offset=yb_offset,
)
# Initialize forwards and backwards qubits into the 0 state, i.e. prepare
# rho_0 = |0><0|.
for q in qubits:
tensors += [
qtn.Tensor(
data=quimb.up().squeeze(), inds=(f'nf0_q{q.x}',), tags={'Q0', 'i0f', _qpos_tag(q)}
),
qtn.Tensor(
data=quimb.up().squeeze(), inds=(f'nb0_q{q.x}',), tags={'Q0', 'i0b', _qpos_tag(q)}
),
]
_positions(0, q)
for mi, moment in enumerate(circuit.moments):
for op in moment.operations:
start_inds_f = [f'nf{qubit_frontier[q]}_q{q.x}' for q in op.qubits]
start_inds_b = [f'nb{qubit_frontier[q]}_q{q.x}' for q in op.qubits]
for q in op.qubits:
qubit_frontier[q] += 1
end_inds_f = [f'nf{qubit_frontier[q]}_q{q.x}' for q in op.qubits]
end_inds_b = [f'nb{qubit_frontier[q]}_q{q.x}' for q in op.qubits]
if cirq.has_unitary(op):
U = cirq.unitary(op).reshape((2,) * 2 * len(op.qubits)).astype(np.complex128)
tensors.append(
qtn.Tensor(
data=U,
inds=end_inds_f + start_inds_f,
tags={f'Q{len(op.qubits)}', f'i{mi + 1}f', _qpos_tag(op.qubits)},
)
)
tensors.append(
qtn.Tensor(
data=np.conj(U),
inds=end_inds_b + start_inds_b,
tags={f'Q{len(op.qubits)}', f'i{mi + 1}b', _qpos_tag(op.qubits)},
)
)
elif cirq.has_kraus(op):
K = np.asarray(cirq.kraus(op), dtype=np.complex128)
kraus_inds = [f'k{kraus_frontier}']
tensors.append(
qtn.Tensor(
data=K,
inds=kraus_inds + end_inds_f + start_inds_f,
tags={f'kQ{len(op.qubits)}', f'i{mi + 1}f', _qpos_tag(op.qubits)},
)
)
tensors.append(
qtn.Tensor(
data=np.conj(K),
inds=kraus_inds + end_inds_b + start_inds_b,
tags={f'kQ{len(op.qubits)}', f'i{mi + 1}b', _qpos_tag(op.qubits)},
)
)
kraus_frontier += 1
else:
raise ValueError(repr(op)) # coverage: ignore
_positions(mi + 1, op.qubits)
return tensors, qubit_frontier, positions
def tensor_density_matrix(
circuit: cirq.Circuit, qubits: Optional[List[cirq.LineQubit]] = None
) -> np.ndarray:
"""Given a circuit with mixtures or channels, contract a tensor network
representing the resultant density matrix.
Note: If the circuit contains 6 qubits or fewer, we use a bespoke
contraction ordering that corresponds to the "normal" in-time contraction
ordering. Otherwise, the contraction order determination could take
longer than doing the contraction. Your mileage may vary and benchmarking
is encouraged for your particular problem if performance is important.
"""
if qubits is None:
qubits = sorted(cast(Iterable[cirq.LineQubit], circuit.all_qubits()))
tensors, qubit_frontier, _ = circuit_to_density_matrix_tensors(circuit=circuit, qubits=qubits)
tn = qtn.TensorNetwork(tensors)
f_inds = tuple(f'nf{qubit_frontier[q]}_q{q.x}' for q in qubits)
b_inds = tuple(f'nb{qubit_frontier[q]}_q{q.x}' for q in qubits)
if len(qubits) <= 6:
# Heuristic: don't try to determine best order for low qubit number
# Just contract in time.
tags_seq = [(f'i{i}b', f'i{i}f') for i in range(len(circuit) + 1)]
tn.contract_cumulative(tags_seq, inplace=True)
else:
tn.contract(inplace=True)
return tn.to_dense(f_inds, b_inds)
| {
"repo_name": "quantumlib/Cirq",
"path": "cirq-core/cirq/contrib/quimb/density_matrix.py",
"copies": "1",
"size": "8487",
"license": "apache-2.0",
"hash": -2706866796492286000,
"line_mean": 38.4744186047,
"line_max": 98,
"alpha_frac": 0.5805349358,
"autogenerated": false,
"ratio": 3.3139398672393594,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43944748030393593,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from wrapt import synchronized
from time import time
from collections import namedtuple
from util import iprint
TimeSignature = namedtuple("TimeSignature", ["top", "bottom"])
TimeReport = namedtuple("TimeReport", ["delta", "measure", "signature", "tick", "bpm", "prog", "playing"])
"""
- delta: Time passed since last report, in seconds
- measure: Current measure of the song, starting with 0
- signature: TimeSignature namedtuple
- tick: Tick within the current measure
- bpm: Currently set BPM
- prog: Moves within [0.0, 1.0) c R over the span of one measure
- playing: Whether the song is playing or not
"""
@lru_cache(8)
def ticks_per_measure(signature):
return signature.top * 24 / (signature.bottom / 4)
@lru_cache()
def seconds_per_tick(bpm):
return 60.0 / bpm / 24
@synchronized
class Clock(object):
def __init__(self, parent):
self.parent = parent
self._bpm = 120
self.signature = TimeSignature(4, 4)
self.tick_count = 0
self.measure_count = 0
self.last_report_time = None
self.last_tick_time = None
self.prog = 0.0
self.running = False
self.absolute_time = 0.0
self.last_prog = 0.0
@synchronized
def get_report(self):
# DELTA
now = time()
try:
delta = now - self.last_report_time
except TypeError: # self.last_report_time not set yet
delta = 0.0
self.absolute_time += delta
self.last_report_time = now
# PROG
if self.running:
try:
self.prog = self.tick_count / ticks_per_measure(self.signature)
delta_tick = now - self.last_tick_time
iprint(delta_tick < 0, "OOPS")
self.prog += delta_tick / seconds_per_tick(self.bpm) / ticks_per_measure(self.signature)
except TypeError: # self.last_report_time not set yet
self.prog = 0.0
else:
free_prog = delta / seconds_per_tick(self.bpm) / ticks_per_measure(self.signature)
self.prog = self.prog + free_prog
self.prog %= 1
# @BUG: prog not monotone. Happens when the clock stops and then resumes running (sometimes)
if (self.prog <= self.last_prog):
print(self.prog, self.last_prog, now, self.last_tick_time)
self.last_prog = self.prog
return TimeReport(delta, self.measure_count, self.signature, self.tick_count, self._bpm, self.prog, self.running)
@property
def bpm(self):
return self._bpm
@synchronized
@bpm.setter
def bpm(self, value):
self._bpm = value
@synchronized
def start(self):
self.measure_count = 0
self.tick_count = 0
self.running = True
@synchronized
def stop(self):
self.running = False
@synchronized
def unpause(self):
self.running = True
@synchronized
def tick(self):
if self.tick_count == 0:
self._at_measure_start()
self.tick_count = (self.tick_count + 1) % (self.signature.top * 24 / (self.signature.bottom / 4))
self.last_tick_time = time()
def _at_measure_start(self):
print("Measure", self.measure_count, self.signature, self.bpm)
self.measure_count += 1
| {
"repo_name": "echolox/smartbcr2k",
"path": "smci/clock.py",
"copies": "1",
"size": "3365",
"license": "mit",
"hash": -2677691082079889000,
"line_mean": 26.5819672131,
"line_max": 121,
"alpha_frac": 0.6005943536,
"autogenerated": false,
"ratio": 3.6299892125134843,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9679773648066239,
"avg_score": 0.010161983609449022,
"num_lines": 122
} |
from functools import lru_cache
import json
import re
import typing
import warnings
from django import template
from django.conf import settings
from django.utils.encoding import force_str
from django.utils.html import format_html
from django.utils.safestring import mark_safe
import pubcode
from ..models import UIText, UserAdapter, Event, RemoteEvent
from ..text_engine import mark_down
register = template.Library()
def _get_ui_text_query(context, id_):
event = context["event"] # type: Event
source_event = context.get("source_event") # type: typing.Union[Event, RemoteEvent]
if source_event is None:
warnings.warn("Missing source_event value when getting text " + id_)
source_event = event
database = source_event.get_real_database_alias()
return UIText.objects.using(database).filter(event=source_event)
@register.simple_tag(takes_context=True)
def load_text(context, id_):
try:
md = _get_ui_text_query(context, id_).get(identifier=id_).text
return mark_safe(mark_down(md, context))
except UIText.DoesNotExist:
if settings.DEBUG:
return format_html(
u'<span style="background-color: lightyellow;'
u' color: black;'
u' border: 1px solid gray;">'
u'Missing text {0}.</span>'.format(
force_str(id_)
)
)
return u""
@register.simple_tag(takes_context=True)
def load_texts(context, id_, wrap=None):
"""
Output multiple UIText values. If id is not found, only empty string is returned.
:param id_: Start of id string to find.
:type id_: str | unicode
:param wrap: Optional wrapping tag content (such as p). If whitespace, that is used instead.
:type wrap: str | unicode | None
:return: str | unicode
"""
texts = (
_get_ui_text_query(context, id_)
.filter(identifier__startswith=id_)
.order_by("identifier")
.values_list("text", flat=True)
)
if not texts:
return u""
begin = u""
end = u""
joined = u""
if wrap is not None:
trimmed = wrap.strip()
if len(trimmed) > 0:
begin = format_html(u'<{0}>', trimmed)
end = format_html(u'</{0}>', trimmed.split(" ")[0])
joined = begin + end
else:
joined = wrap
return mark_safe(begin + joined.join(mark_down(text, context) for text in texts) + end)
# Limit the size of the dict to a reasonable number so that we don't have
# millions of dataurls cached.
@lru_cache(maxsize=50000)
def get_dataurl(code, ext, expect_width=143):
if not code:
return ''
# Code the barcode entirely with charset B to make sure that the bacode is
# always the same width.
barcode = pubcode.Code128(code, charset='B')
data_url = barcode.data_url(image_format=ext, add_quiet_zone=True)
# These measurements have to be exactly the same as the ones used in
# price_tags.css. If they are not the image might be distorted enough
# to not register on the scanner.
assert(expect_width is None or barcode.width(add_quiet_zone=True) == expect_width)
return data_url
@register.simple_tag
def barcode_dataurl(code, ext, expect_width=143):
return get_dataurl(code, ext, expect_width)
@register.simple_tag
def barcode_css(low=4, high=6, target=None, container=None, compress=False):
target = target or ".barcode_img.barcode_img{0}"
container = container or ".barcode_container.barcode_container{0}"
css = """
{target}, {container} {{
width: {px}px;
background-color: white;
}}
"""
output = []
for code_length in range(low, high + 1):
example_code = pubcode.Code128('A' * code_length, charset='B')
px = example_code.width(add_quiet_zone=True)
for multiplier in range(1, 3):
suffix = "_" + str(code_length) + "_" + str(multiplier)
mpx = px * multiplier
rule = css.format(
target=target.format(suffix),
container=container.format(suffix),
px=mpx,
)
if compress:
rule = re.sub(r'[\s]+', "", rule)
output.append(rule)
return "".join(output)
@register.filter
def user_adapter(user, getter):
"""
Filter for using UserAdapter for user objects.
:param user: User to filter, class of `settings.USER_MODEL`.
:param getter: Getter function to apply to the user via adapter.
:type getter: str
"""
if not isinstance(getter, str) or getter.startswith("_"):
raise AttributeError("Invalid adapter attribute.")
getter = getattr(UserAdapter, getter)
return getter(user)
# https://djangosnippets.org/snippets/660/
class SplitListNode(template.Node):
def __init__(self, list_string, chunk_size, new_list_name):
self.list = list_string
self.chunk_size = chunk_size
self.new_list_name = new_list_name
@staticmethod
def split_seq(seq, size):
""" Split up seq in pieces of size, from
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/425044"""
return [seq[i:i+size] for i in range(0, len(seq), size)]
def render(self, context):
context[self.new_list_name] = self.split_seq(context[self.list], int(self.chunk_size))
return ''
def split_list(parser, token):
"""<% split_list list as new_list 5 %>"""
bits = token.contents.split()
if len(bits) != 5:
raise template.TemplateSyntaxError("split_list list as new_list 5")
return SplitListNode(bits[1], bits[4], bits[3])
split_list = register.tag(split_list)
def as_json(obj):
return mark_safe(json.dumps(obj))
register.filter("json", as_json)
@register.filter
def format_price(value, format_type="raw"):
return "{}{}{}".format(
settings.KIRPPU_CURRENCY[format_type][0],
str(value),
settings.KIRPPU_CURRENCY[format_type][1]
)
| {
"repo_name": "jlaunonen/kirppu",
"path": "kirppu/templatetags/kirppu_tags.py",
"copies": "1",
"size": "6029",
"license": "mit",
"hash": -2683243165230529500,
"line_mean": 30.0773195876,
"line_max": 96,
"alpha_frac": 0.6261403218,
"autogenerated": false,
"ratio": 3.656155245603396,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9779133657763175,
"avg_score": 0.0006323819280442378,
"num_lines": 194
} |
from functools import lru_cache
import logging
import lxml.html
from cssselect import HTMLTranslator
class SelectorException(RuntimeError):
def __init__(self, selector, document):
self.selector = selector
self.doc = document
translator = HTMLTranslator()
@lru_cache()
def xpath(pattern):
return translator.css_to_xpath(pattern)
logger = logging.getLogger("selector")
_notset = object
class Selector(object):
def __init__(self, document):
self.document = document
self.translator = HTMLTranslator()
def find(self, pattern):
expression = xpath(pattern)
results = [Selector(d) for d in self.document.xpath(expression)]
if len(results) == 0:
logger.warning("Selector {0} found 0 results".format(pattern))
return results
def get(self, pattern, default=_notset):
expression = xpath(pattern)
results = self.document.xpath(expression)
try:
return Selector(results[0])
except IndexError as e:
if default is not _notset:
return default
raise SelectorException(pattern, self.document) from e
def has_class(self, cls):
return cls in self.attr.get("class", "").split(" ")
@property
def attr(self):
return dict(self.document.items())
@property
def text(self):
return self.document.text_content()
@property
def parent(self):
return Selector(self.document.getparent())
@property
def pretty(self):
return lxml.html.etree.tostring(self.document, pretty_print=True).decode()
| {
"repo_name": "orf/cyborg",
"path": "cyborg/selector.py",
"copies": "1",
"size": "1630",
"license": "apache-2.0",
"hash": 3957104478310924300,
"line_mean": 24.0769230769,
"line_max": 82,
"alpha_frac": 0.6404907975,
"autogenerated": false,
"ratio": 4.201030927835052,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00018535681186283596,
"num_lines": 65
} |
from functools import lru_cache
import logging
import pandas as pd
from discoutils.thesaurus_loader import Vectors
from eval.pipeline.bov import ThesaurusVectorizer
class MultiVectors(Vectors):
def __init__(self, vectors):
self.vectors = vectors
def init_sims(self, *args, **kwargs):
for v in self.vectors:
v.init_sims(*args, **kwargs)
def __len__(self):
return len(self.vectors[0])
def __contains__(self, item):
return any(item in v for v in self.vectors)
@lru_cache(maxsize=2 ** 16)
def get_nearest_neighbours(self, entry):
if entry not in self:
return []
if sum(entry in v for v in self.vectors) < 2:
# entry contained in too few of the repeated runs, it is probably a spurious word
# with a low-quality vector. pretend it is not there
return []
data = []
for tid, t in enumerate(self.vectors):
neighbours = t.get_nearest_neighbours_linear(entry)
if neighbours:
for rank, (neigh, sim) in enumerate(neighbours):
data.append([tid, rank, neigh, sim])
if not data:
return []
df = pd.DataFrame(data, columns='tid, rank, neigh, sim'.split(', '))
# Watch out! Higher rank is currently better! This makes sense if we use this is a similarity metric or a
# pseudo term count, but doesn't match the rest of the codebase, where distances are used (lower is better)
ddf = df.groupby('neigh').aggregate({'rank': 'mean',
'sim': 'mean',
'tid': 'count'}).rename(columns={'tid': 'contained_in',
'rank': 'mean_rank',
'sim': 'mean_dist'})
ddf = ddf.sort(['contained_in', 'mean_rank', 'mean_dist'],
ascending=[False, True, True], kind='mergesort') # must be stable
a = 1. + ddf.mean_rank.values
ddf.mean_rank = 1 / a
return list(zip(ddf.index, ddf.mean_rank) if len(ddf) else [])
class KmeansVectorizer(ThesaurusVectorizer):
def fit_transform(self, raw_documents, y=None, clusters=None, **kwargs):
if clusters is None:
raise ValueError('Need a clusters file to fit this model')
self.clusters = clusters
return super().fit_transform(raw_documents, y=y, **kwargs)
def _process_single_feature(self, feature, j_indices, values, vocabulary):
try:
# insert cluster number of this features as its column number
cluster_id = self.clusters.ix[str(feature)][0]
feature_index_in_vocab = vocabulary['cluster%d' % cluster_id]
j_indices.append(feature_index_in_vocab)
values.append(1)
except KeyError:
# the feature is not contained in the distributional model, ignore it
pass
except IndexError:
logging.warning('IndexError for feature %r', feature) | {
"repo_name": "mbatchkarov/dc_evaluation",
"path": "eval/pipeline/multivectors.py",
"copies": "1",
"size": "3155",
"license": "bsd-3-clause",
"hash": -2026673979519361500,
"line_mean": 42.2328767123,
"line_max": 115,
"alpha_frac": 0.5635499208,
"autogenerated": false,
"ratio": 4.162269129287599,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5225819050087599,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
import logging
import requests
import urllib
from eva_cttv_pipeline.trait_mapping.utils import json_request
OLS_EFO_SERVER = 'https://www.ebi.ac.uk/ols'
# The setting for local OLS installation should be uncommented if necessary. Note that the link
# for the local deployment is different from the production link in three regards: (1) it must use
# HTTP instead of HTTPS; (2) it must include the port which you used when deploying the Docker
# container; (3) it does *not* include /ols in its path.
# OLS_EFO_SERVER = 'http://127.0.0.1:8080'
logger = logging.getLogger(__package__)
def build_ols_query(ontology_uri: str) -> str:
"""Build a url to query OLS for a given ontology uri."""
return "https://www.ebi.ac.uk/ols/api/terms?iri={}".format(ontology_uri)
@lru_cache(maxsize=16384)
def get_ontology_label_from_ols(ontology_uri: str) -> str:
"""
Using provided ontology URI, build an OLS URL with which to make a request to find the term label for this URI.
:param ontology_uri: A URI for a term in an ontology.
:return: Term label for the ontology URI provided in the parameters.
"""
url = build_ols_query(ontology_uri)
json_response = json_request(url)
if not json_response:
return None
# If the '_embedded' section is missing from the response, it means that the term is not found in OLS
if '_embedded' not in json_response:
if '/medgen/' not in url and '/omim/' not in url:
logger.warning('OLS queried OK but did not return any results for URL {}'.format(url))
return None
# Go through all terms found by the requested identifier and try to find the one where the _identifier_ and the
# _term_ come from the same ontology (marked by a special flag). Example of such a situation would be a MONDO term
# in the MONDO ontology. Example of a reverse situation is a MONDO term in EFO ontology (being *imported* into it
# at some point).
for term in json_response["_embedded"]["terms"]:
if term["is_defining_ontology"]:
return term["label"]
if '/medgen/' not in url and '/omim/' not in url:
logger.warning('OLS queried OK, but there is no defining ontology in its results for URL {}'.format(url))
return None
def double_encode_uri(uri: str) -> str:
"""Double encode a given uri."""
return urllib.parse.quote(urllib.parse.quote(uri, safe=""), safe="")
def ols_efo_query(uri: str) -> requests.Response:
"""
Query EFO using OLS for a given ontology uri, returning the response from the request.
:param uri: Ontology uri to use in querying EFO using OLS
:return: Response from OLS
"""
double_encoded_uri = double_encode_uri(uri)
return requests.get(
"{}/api/ontologies/efo/terms/{}".format(OLS_EFO_SERVER, double_encoded_uri))
@lru_cache(maxsize=16384)
def is_current_and_in_efo(uri: str) -> bool:
"""
Checks whether given ontology uri is a valid and non-obsolete term in EFO.
:param uri: Ontology uri to use in querying EFO using OLS
:return: Boolean value, true if ontology uri is valid and non-obsolete term in EFO
"""
response = ols_efo_query(uri)
if response.status_code != 200:
return False
response_json = response.json()
return not response_json["is_obsolete"]
@lru_cache(maxsize=16384)
def is_in_efo(uri: str) -> bool:
"""
Checks whether given ontology uri is a valid term in EFO.
:param uri: Ontology uri to use in querying EFO using OLS
:return: Boolean value, true if ontology uri is valid and non-obsolete term in EFO
"""
response = ols_efo_query(uri)
return response.status_code == 200
| {
"repo_name": "EBIvariation/eva-cttv-pipeline",
"path": "eva_cttv_pipeline/trait_mapping/ols.py",
"copies": "1",
"size": "3709",
"license": "apache-2.0",
"hash": 5185457994106447000,
"line_mean": 36.8469387755,
"line_max": 118,
"alpha_frac": 0.6872472365,
"autogenerated": false,
"ratio": 3.5357483317445184,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9715892893025567,
"avg_score": 0.001420535043790126,
"num_lines": 98
} |
from functools import lru_cache
import logging
import warnings
import civis
from civis.resources import generate_classes_maybe_cached
from civis._utils import get_api_key
from civis._deprecation import deprecate_param
log = logging.getLogger(__name__)
RETRY_CODES = [429, 502, 503, 504]
RETRY_VERBS = ['HEAD', 'TRACE', 'GET', 'PUT', 'OPTIONS', 'DELETE']
POST_RETRY_CODES = [429, 503]
def find(object_list, filter_func=None, **kwargs):
"""Filter :class:`civis.response.Response` objects.
Parameters
----------
object_list : iterable
An iterable of arbitrary objects, particularly those with attributes
that can be targeted by the filters in `kwargs`. A major use case is
an iterable of :class:`civis.response.Response` objects.
filter_func : callable, optional
A one-argument function. If specified, `kwargs` are ignored.
An `object` from the input iterable is kept in the returned list
if and only if ``bool(filter_func(object))`` is ``True``.
**kwargs
Key-value pairs for more fine-grained filtering; they cannot be used
in conjunction with `filter_func`. All keys must be strings.
For an `object` from the input iterable to be included in the
returned list, all the `key`s must be attributes of `object`, plus
any one of the following conditions for a given `key`:
- `value` is a one-argument function and
``bool(value(getattr(object, key)))`` is ``True``
- `value` is ``True``
- ``getattr(object, key)`` is equal to ``value``
Returns
-------
list
Examples
--------
>>> import civis
>>> client = civis.APIClient()
>>> # creds is a list of civis.response.Response objects
>>> creds = client.credentials.list()
>>> # target_creds contains civis.response.Response objects
>>> # with the attribute 'name' == 'username'
>>> target_creds = find(creds, name='username')
See Also
--------
civis.find_one
"""
_func = filter_func
if not filter_func:
def default_filter(o):
for k, v in kwargs.items():
if not hasattr(o, k):
return False
elif callable(v):
if not v(getattr(o, k, None)):
return False
elif isinstance(v, bool):
if hasattr(o, k) != v:
return False
elif v != getattr(o, k, None):
return False
return True
_func = default_filter
return [o for o in object_list if _func(o)]
def find_one(object_list, filter_func=None, **kwargs):
"""Return one satisfying :class:`civis.response.Response` object.
The arguments are the same as those for :func:`civis.find`.
If more than one object satisfies the filtering criteria,
the first one is returned.
If no satisfying objects are found, ``None`` is returned.
Returns
-------
object or None
See Also
--------
civis.find
"""
results = find(object_list, filter_func, **kwargs)
return results[0] if results else None
class MetaMixin():
@lru_cache(maxsize=128)
def get_database_id(self, database):
"""Return the database ID for a given database name.
Parameters
----------
database : str or int
If an integer ID is given, passes through. If a str is given
the database ID corresponding to that database name is returned.
Returns
-------
database_id : int
The ID of the database.
Raises
------
ValueError
If the database can't be found.
"""
if isinstance(database, int):
return database
db = find_one(self.databases.list(), name=database)
if not db:
raise ValueError("Database {} not found.".format(database))
return db["id"]
@lru_cache(maxsize=128)
def get_database_credential_id(self, username, database_name):
"""Return the credential ID for a given username in a given database.
Parameters
----------
username : str or int
If an integer ID is given, this passes through directly. If a
str is given, return the ID corresponding to the database
credential with that username.
database_name : str or int
Return the ID of the database credential with username
`username` for this database name or ID.
Returns
-------
database_credential_id : int
The ID of the database credentials.
Raises
------
ValueError
If the credential can't be found.
Examples
--------
>>> import civis
>>> client = civis.APIClient()
>>> client.get_database_credential_id('jsmith', 'redshift-general')
1234
>>> client.get_database_credential_id(1111, 'redshift-general')
1111
"""
if isinstance(username, int):
return username
else:
creds = self.credentials.list(type="Database")
filter_kwargs = {'username': username}
if isinstance(database_name, int):
filter_kwargs['remote_host_id'] = database_name
else:
filter_kwargs['remote_host_name'] = database_name
my_creds = find_one(creds, **filter_kwargs)
if my_creds is None:
raise ValueError("Credential ID for {} on {} not "
"found.".format(username, database_name))
return my_creds["id"]
@lru_cache(maxsize=128)
def get_aws_credential_id(self, cred_name, owner=None):
"""Find an AWS credential ID.
Parameters
----------
cred_name : str or int
If an integer ID is given, this passes through directly. If a
str is given, return the ID corresponding to the AWS credential
with that name.
owner : str, optional
Return the credential with this owner. If not provided, search
for credentials under your username to disambiguate multiple
credentials with the same name. Note that this function cannot
return credentials which are not associated with an owner.
Returns
-------
aws_credential_id : int
The ID number of the AWS credentials.
Raises
------
ValueError
If the AWS credential can't be found.
Examples
--------
>>> import civis
>>> client = civis.APIClient()
>>> client.get_aws_credential_id('jsmith')
1234
>>> client.get_aws_credential_id(1111)
1111
>>> client.get_aws_credential_id('shared-cred',
... owner='research-group')
99
"""
if isinstance(cred_name, int):
return cred_name
else:
creds = self.credentials.list(type="Amazon Web Services S3")
my_creds = find(creds, name=cred_name)
if owner is not None:
my_creds = find(my_creds, owner=owner)
if not my_creds:
own_str = "" if owner is None else " owned by {}".format(owner)
msg = "AWS credential ID for {}{} cannot be found"
raise ValueError(msg.format(cred_name, own_str))
elif len(my_creds) > 1:
if owner is None:
# If the user didn't specify an owner, see if we can
# narrow down to just credentials owned by this user.
owner = self.username
my_creds = find(my_creds, owner=owner)
if len(my_creds) > 1:
log.warning("Found %d AWS credentials with name %s and "
"owner %s. Returning the first.",
len(my_creds), cred_name, owner)
my_creds = my_creds[0]
return my_creds["id"]
@lru_cache(maxsize=128)
def get_table_id(self, table, database):
"""Return the table ID for a given database and table name.
Parameters
----------
table : str
The name of the table in format schema.tablename.
Either schema or tablename, or both, can be double-quoted to
correctly parse special characters (such as '.').
database : str or int
The name or ID of the database.
Returns
-------
table_id : int
The ID of the table.
Raises
------
ValueError
If a table match can't be found.
Examples
--------
>>> import civis
>>> client = civis.APIClient()
>>> client.get_table_id('foo.bar', 'redshift-general')
123
>>> client.get_table_id('"schema.has.periods".bar', 'redshift-general')
456
"""
database_id = self.get_database_id(database)
schema, name = civis.io.split_schema_tablename(table)
tables = self.tables.list(database_id=database_id, schema=schema,
name=name)
if not tables:
msg = "No tables found for {} in database {}"
raise ValueError(msg.format(table, database))
return tables[0].id
@lru_cache(maxsize=128)
def get_storage_host_id(self, storage_host):
"""Return the storage host ID for a given storage host name.
Parameters
----------
storage_host : str or int
If an integer ID is given, passes through. If a str is given
the storage host ID corresponding to that storage host is returned.
Returns
-------
storage_host_id : int
The ID of the storage host.
Raises
------
ValueError
If the storage host can't be found.
Examples
--------
>>> import civis
>>> client = civis.APIClient()
>>> client.get_storage_host_id('test host')
1234
>>> client.get_storage_host_id(1111)
1111
"""
if isinstance(storage_host, int):
return storage_host
sh = find_one(self.storage_hosts.list(), name=storage_host)
if not sh:
raise ValueError("Storage Host {} not found.".format(storage_host))
return sh["id"]
@property
@lru_cache(maxsize=128)
def default_credential(self):
"""The current user's default credential."""
# NOTE: this should be optional to endpoints...so this could go away
creds = self.credentials.list(default=True)
return creds[0]['id'] if len(creds) > 0 else None
@property
@lru_cache(maxsize=128)
def username(self):
"""The current user's username."""
return self.users.list_me().username
class APIClient(MetaMixin):
"""The Civis API client.
Parameters
----------
api_key : str, optional
Your API key obtained from the Civis Platform. If not given, the
client will use the :envvar:`CIVIS_API_KEY` environment variable.
return_type : str, optional
The following types are implemented:
- ``'raw'`` Returns the raw :class:`requests:requests.Response` object.
- ``'snake'`` Returns a :class:`civis.response.Response` object for the
json-encoded content of a response. This maps the top-level json
keys to snake_case.
- ``'pandas'`` Returns a :class:`pandas:pandas.DataFrame` for
list-like responses and a :class:`pandas:pandas.Series` for single a
json response.
retry_total : int, optional
A number indicating the maximum number of retries for 429, 502, 503, or
504 errors.
api_version : string, optional
The version of endpoints to call. May instantiate multiple client
objects with different versions. Currently only "1.0" is supported.
resources : string, optional
When set to "base", only the default endpoints will be exposed in the
client object. Set to "all" to include all endpoints available for
a given user, including those that may be in development and subject
to breaking changes at a later date. This will be removed in a future
version of the API client.
local_api_spec : collections.OrderedDict or string, optional
The methods on this class are dynamically built from the Civis API
specification, which can be retrieved from the /endpoints endpoint.
When local_api_spec is None, the default, this specification is
downloaded the first time APIClient is instantiated. Alternatively,
a local cache of the specification may be passed as either an
OrderedDict or a filename which points to a json file.
"""
@deprecate_param('v2.0.0', 'resources')
def __init__(self, api_key=None, return_type='snake',
retry_total=6, api_version="1.0", resources="all",
local_api_spec=None):
if return_type not in ['snake', 'raw', 'pandas']:
raise ValueError("Return type must be one of 'snake', 'raw', "
"'pandas'")
self._feature_flags = ()
session_auth_key = get_api_key(api_key)
self._session_kwargs = {'api_key': session_auth_key}
self.last_response = None
# Catch deprecation warnings from generate_classes_maybe_cached and
# the functions it calls until the `resources` argument is removed.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=FutureWarning,
module='civis')
classes = generate_classes_maybe_cached(local_api_spec,
session_auth_key,
api_version,
resources)
for class_name, cls in classes.items():
setattr(self, class_name, cls(self._session_kwargs, client=self,
return_type=return_type))
@property
def feature_flags(self):
if self._feature_flags:
return self._feature_flags
me = self.users.list_me()
self._feature_flags = tuple(flag for flag, value
in me['feature_flags'].items() if value)
return self._feature_flags
def __getstate__(self):
raise RuntimeError("The APIClient object can't be pickled.")
| {
"repo_name": "civisanalytics/civis-python",
"path": "civis/civis.py",
"copies": "1",
"size": "14803",
"license": "bsd-3-clause",
"hash": -5328278124501232000,
"line_mean": 34.3293556086,
"line_max": 79,
"alpha_frac": 0.5673174357,
"autogenerated": false,
"ratio": 4.5241442542787285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 419
} |
from functools import lru_cache
import math
class Solution:
def mergeStones(self, stones, K: int) -> int:
if (len(stones) - 1) % (K - 1):
return -1
presum = [0] * (1 + len(stones))
for i in range(len(stones)):
presum[i + 1] = presum[i] + stones[i]
@lru_cache(None)
def dfs(start, end, p):
if end - start + 1 == p:
return 0
if p == 1:
return dfs(start, end, K) + presum[end + 1] - presum[start]
return min(dfs(start, mid, 1) + dfs(mid + 1, end, p - 1) for mid in range(start, end, K - 1))
return dfs(0, len(stones) - 1, 1)
class Solution2:
def mergeStones(self, stones, K: int) -> int:
if (len(stones) - 1) % (K - 1):
return -1
presum = [0] * (1 + len(stones))
for i in range(len(stones)):
presum[i + 1] = presum[i] + stones[i]
dp = [[0]*len(stones) for _ in range(len(stones))]
for l in range(K, len(stones) + 1):
for start in range(len(stones) - l + 1):
end = start + l - 1
dp[start][end] = math.inf
for mid in range(start, end, K - 1):
dp[start][end] = min(dp[start][end], dp[start][mid] + dp[mid + 1][end])
if (end - start) % (K - 1) == 0:
dp[start][end] += presum[end + 1] - presum[start]
return dp[0][-1]
| {
"repo_name": "jiadaizhao/LeetCode",
"path": "0901-1000/1000-Minimum Cost to Merge Stones/1000-Minimum Cost to Merge Stones.py",
"copies": "1",
"size": "1455",
"license": "mit",
"hash": -7542997399918738000,
"line_mean": 33.6428571429,
"line_max": 105,
"alpha_frac": 0.4652920962,
"autogenerated": false,
"ratio": 3.1357758620689653,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9085736218487739,
"avg_score": 0.0030663479562453183,
"num_lines": 42
} |
from functools import lru_cache
import math
from skimage.transform import resize
from rectpack import newPacker, SORT_AREA
from rectpack.guillotine import GuillotineBafSlas
import numpy as np
import pandas as pd
from ifcb.data.adc import SCHEMA_VERSION_1
from ifcb.data.stitching import InfilledImages
class Mosaic(object):
def __init__(self, the_bin, shape=(600, 800), scale=0.33, bg_color=200, coordinates=None):
self.bin = the_bin
self.shape = shape
self.bg_color = bg_color
self.scale = scale
self.coordinates = coordinates
@lru_cache()
def _shapes(self):
hs, ws, ix = [], [], []
with self.bin:
ii = InfilledImages(self.bin)
for target_number in ii:
h, w = ii.shape(target_number)
hs.append(math.floor(h * self.scale))
ws.append(math.floor(w * self.scale))
ix.append(target_number)
return zip(hs, ws, ix)
def pack(self, max_pages=20):
if self.coordinates is not None:
return self.coordinates
page_h, page_w = self.shape
pages = [(page_h - 1, page_w - 1) for _ in range(max_pages)]
packer = newPacker(sort_algo=SORT_AREA, rotation=False, pack_algo=GuillotineBafSlas)
for r in self._shapes():
packer.add_rect(*r)
for p in pages:
packer.add_bin(*p)
packer.pack()
COLS = ['page', 'y', 'x', 'h', 'w', 'roi_number']
self.coordinates = pd.DataFrame(packer.rect_list(), columns=COLS)
return self.coordinates
def page(self, page=0):
df = self.pack()
page_h, page_w = self.shape
page_image = np.zeros((page_h, page_w), dtype=np.uint8) + self.bg_color
sdf = df[df.page == page]
with self.bin:
if self.bin.schema == SCHEMA_VERSION_1:
ii = InfilledImages(self.bin)
else:
ii = self.bin.images
for index, row in sdf.iterrows():
y, x = row.y, row.x
h, w = row.h, row.w
unscaled_image = ii[row.roi_number]
scaled_image = resize(unscaled_image, (h, w), mode='reflect', preserve_range=True)
page_image[y:y+h, x:x+w] = scaled_image
return page_image | {
"repo_name": "joefutrelle/pyifcb",
"path": "ifcb/viz/mosaic.py",
"copies": "1",
"size": "2322",
"license": "mit",
"hash": 4971968666164284000,
"line_mean": 35.873015873,
"line_max": 98,
"alpha_frac": 0.5684754522,
"autogenerated": false,
"ratio": 3.3997071742313323,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44681826264313323,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
import numpy as np
from menpo3d.correspond import non_rigid_icp
from .data import load_template
def smootherstep(x, x_min, x_max):
y = np.clip((x - x_min) / (x_max - x_min), 0, 1)
return 6 * (y ** 5) - 15 * (y ** 4) + 10 * (y ** 3)
def generate_data_weights(template, nosetip, r_mid=1.05, r_width=0.3,
y_pen=1.4, w_inner=1, w_outer=0):
r_min = r_mid - (r_width / 2)
r_max = r_mid + (r_width / 2)
w_range = w_inner - w_outer
x = np.sqrt(np.sum((template.points - nosetip.points) ** 2 *
np.array([1, y_pen, 1]), axis=1))
return ((1 - smootherstep(x, r_min, r_max))[:, None] * w_range + w_outer).T
def generate_data_weights_per_iter(template, nosetip, r_width, w_min_iter,
w_max_iter, r_mid=10.5, y_pen=1.4):
# Change in the data term follows the same pattern that is used for the
# stiffness weights
stiffness_weights = np.array([50, 20, 5, 2, 0.8, 0.5, 0.35, 0.2])
s_iter_range = stiffness_weights[0] - stiffness_weights[-1]
w_iter_range = w_max_iter - w_min_iter
m = w_iter_range / s_iter_range
c = w_max_iter - m * stiffness_weights[0]
w_outer = m * stiffness_weights + c
w_inner = 1
return generate_data_weights(template, nosetip, w_inner=w_inner,
w_outer=w_outer, r_width=r_width, r_mid=r_mid,
y_pen=y_pen)
@lru_cache()
def data_weights():
w_max_iter = 0.5
w_min_iter = 0.0
r_width = 0.5 * 0.84716526594210229
r_mid = 0.95 * 0.84716526594210229
y_pen = 1.7
template = load_template()
return generate_data_weights_per_iter(template,
template.landmarks['nosetip'],
r_width=r_width,
r_mid=r_mid,
w_min_iter=w_min_iter,
w_max_iter=w_max_iter,
y_pen=y_pen
)
def correspond_mesh(mesh, mask=None, verbose=False):
template = load_template().copy()
if mask is not None:
template.landmarks['__lsfm_masked'] = template.landmarks[
'__lsfm'].from_mask(mask)
group = '__lsfm_masked'
if verbose:
n_bad_lms = (~mask).sum()
if not n_bad_lms == 0:
print('occlusion mask provided with {} False values - '
'omitting these landmarks from NICP '
'calculation'.format(n_bad_lms))
else:
group = '__lsfm'
aligned = non_rigid_icp(template, mesh, landmark_group=group,
data_weights=data_weights(), verbose=verbose)
return aligned
| {
"repo_name": "menpo/lsfm",
"path": "lsfm/correspond.py",
"copies": "1",
"size": "2864",
"license": "bsd-3-clause",
"hash": 3206032790369035000,
"line_mean": 38.7777777778,
"line_max": 79,
"alpha_frac": 0.5101256983,
"autogenerated": false,
"ratio": 3.269406392694064,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42795320909940643,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
import os
from subprocess import call, DEVNULL
from time import sleep
# Only available on MacOS
# Install with `pip install pyobj`
from AppKit import NSScreen
from . import BaseSystem
class System(BaseSystem):
@property
def browser_path(self):
return os.path.join('/', 'Applications', 'Google Chrome.app', 'Contents', 'MacOS', 'Google Chrome')
@property
@lru_cache()
def displays(self):
screens = NSScreen.screens()
connected = []
for idx, screen in enumerate(screens):
screen = screen.frame()
origin_y = screen.origin.y
# Flip coordinate space because Apple is weird
# https://developer.apple.com/documentation/coregraphics/cgrect
if len(connected) > 0:
origin_y = -screen.size.height - (origin_y - connected[0]["y"])
connected.append({
"id": idx,
"width": int(screen.size.width),
"height": int(screen.size.height),
"x": int(screen.origin.x),
"y": int(origin_y)
})
return connected
| {
"repo_name": "foxxyz/multibrowse",
"path": "systems/mac.py",
"copies": "1",
"size": "1161",
"license": "mit",
"hash": -2261566442469482000,
"line_mean": 29.5526315789,
"line_max": 107,
"alpha_frac": 0.5831180017,
"autogenerated": false,
"ratio": 4.088028169014085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5171146170714085,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
import os
from subprocess import check_output, CalledProcessError, STDOUT
def _check(path):
if type(path) is not str:
raise ValueError(
"path must be specified as str (actual: {})".format(type(path)))
if not os.path.exists(path):
raise FileNotFoundError(
"no such file or directory: {}".format(path))
return True
def _split(path):
if os.path.isdir(path):
dir, file = path.rstrip(os.path.sep), None
else:
dir, file = os.path.split(path)
return dir, file
def _set_cwd(d, path=None, check=True):
if path is not None:
if check:
_check(path)
dir = _split(path)[0]
if dir != '':
d['cwd'] = dir
def _exec(command, suppress_error=True, **kwargs):
try:
output = check_output(command, stderr=STDOUT, shell=False, **kwargs)
except CalledProcessError as exc:
if not suppress_error:
raise OSError(exc.output.decode('utf-8'))
return None
return output.decode('utf-8').strip()
@lru_cache(maxsize=1)
def root(path=None, suppress_error=True):
command = ['git', 'rev-parse', '--show-toplevel']
kwargs = {}
_set_cwd(kwargs, path)
return _exec(command, suppress_error, **kwargs)
@lru_cache(maxsize=1)
def hash(path=None, short=False, suppress_error=True):
command = ['git', 'rev-parse',
('--short' if short else '--verify'), 'HEAD']
kwargs = {}
_set_cwd(kwargs, path)
return _exec(command, suppress_error, **kwargs)
@lru_cache(maxsize=1)
def relpath(path=None, suppress_error=True):
command = ['git', 'rev-parse', '--show-prefix']
kwargs = {}
_set_cwd(kwargs, path)
file = _split(path)[1] if path else None
relpath = _exec(command, suppress_error, **kwargs)
if relpath is not None and file is not None:
relpath = os.path.join(relpath, file)
return relpath
| {
"repo_name": "chantera/teras",
"path": "teras/utils/git.py",
"copies": "1",
"size": "1945",
"license": "mit",
"hash": 299325095899491800,
"line_mean": 27.1884057971,
"line_max": 76,
"alpha_frac": 0.6107969152,
"autogenerated": false,
"ratio": 3.5108303249097474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9621627240109747,
"avg_score": 0,
"num_lines": 69
} |
from functools import lru_cache
import os
import glob
import copy
from itertools import product
import numpy as np
from scipy.ndimage import map_coordinates
from scipy.interpolate import interp1d,RegularGridInterpolator,\
UnivariateSpline
import astropy.units as u
from astropy.io import fits
from astropy.table import Table
from . import convolve
from . import fitting
from . import photometry
from . import spectrum
from . import filter
from . import utils
from . import config as cfg
class Model(object):
"""Basic model class.
PhotModel and SpecModel are derived from this, the only real
difference is that the former has convolved flux data for an array
of filters, and the latter has flux data for an array of
wavelengths.
Many of the methods and functions required are similar for both, so
are contained in the base class with checking to ensure the right
thing is done. """
@lru_cache(maxsize=32)
def read_file(file):
"""Read a model file."""
# parameter names
fh = fits.open(file)
keywords = fh[0].header
nparam = keywords['NPARAM']
# see what type of model we have
type = keywords['SDFTYPE']
if type == 'PhotModel':
self = PhotModel()
# get the filter names
dat = fh[2].data
fs = np.array(dat,dtype=str)
for i in range(len(fs)):
fs[i] = fs[i].strip()
self.filters = np.array(fs)
elif type == 'SpecModel':
self = SpecModel()
# get the wavelengths
dat = fh[2].data
self.wavelength = np.array(dat,dtype=dat.dtype[0])
else:
raise utils.SdfError("file {} not a PhotModel or SpecModel,\
is a {}".format(file,type))
self.name = keywords['NAME']
self.parameters = [keywords['PARAM'+str(i)] for i in range(nparam)]
# parameter ranges, assume that all values have the same dtype
# so it's OK if the resulting ndarray does
d = {}
for i,par in enumerate(self.parameters):
j = i+3
if str.upper(par) != fh[j].name:
raise utils.SdfError("{}th parameter {} not equal to HDU\
with name {}".format(j,par,fh[j].name))
dat = fh[j].data
d[par] = np.array(dat,dtype=dat.dtype[0])
self.param_values = d
# main array of convolved model fluxes, do this last so the
# dimensionality is checked by the setter
self.fnujy_sr = fh[1].data
header1 = fh[1].header
if header1['BUNIT'] != (u.jansky/u.sr).to_string(format='fits'):
fnuunit = header1['BUNIT']
self.fnujy_sr = self.fnujy_sr * u.Unit(fnuunit).to('Jy / sr')
# integer indices of filters/wavelengths
if type == 'PhotModel':
self.i = np.arange(len(self.filters))
elif type == 'SpecModel':
self.i = np.arange(len(self.wavelength))
self.n_i = len(self.i)
# create the hashed version
self.fill_log_fnujy_sr_hashed()
return self
def write_file(self,file,overwrite=False):
"""Write model to a FITS file.
Number of parameters and their names are stored in the primary
HDU, flux density cube is in the first HDU, and arrays with the
filter names and parameter ranges in subsequent HDUs.
"""
# primary HDU (for metadata)
hdu0 = fits.PrimaryHDU()
hdu0.header['NAME'] = self.name
hdu0.header['NPARAM'] = len(self.parameters)
# keywords for parameters
for i,par in enumerate(self.parameters):
hdu0.header['PARAM'+str(i)] = par
# fluxes
hdu1 = fits.ImageHDU(self.fnujy_sr, name='MODELS')
hdu1.header['BUNIT'] = (u.jansky/u.sr).to_string(format='fits')
# see what type of model we're writing
if isinstance(self,PhotModel):
hdu0.header['SDFTYPE'] = 'PhotModel'
# filter names
t = Table()
t.add_column( Table.Column(self.filters,name='FILTERS') )
hdu2 = fits.BinTableHDU(np.array(t),name='FILTERS')
elif isinstance(self,SpecModel):
hdu0.header['SDFTYPE'] = 'SpecModel'
# Wavelengths
t = Table()
t.add_column( Table.Column(self.wavelength,name='WAVLNTHS') )
hdu2 = fits.BinTableHDU(np.array(t),name='WAVLNTHS')
# parameter ranges
hdup = []
for par in self.parameters:
t = Table()
t.add_column( Table.Column(self.param_values[par],
name=str.upper(par)) )
hdup.append( fits.BinTableHDU(np.array(t),name=str.upper(par)) )
hdus = [hdu0,hdu1,hdu2]
hdus.extend(hdup)
hdulist = fits.HDUList(hdus)
hdulist.writeto(file, overwrite=overwrite)
def fill_log_fnujy_sr_hashed(self):
"""Get a hashed copy of log10 fnujy_sr."""
# log10 just the positive values, others have tiny value
# using the out keyword avoids a bug in numpy 1.13.0
pos = self.fnujy_sr > 0.0
tmp = np.log10(self.fnujy_sr,where=pos,
out=np.zeros(self.fnujy_sr.shape)+np.log10(cfg.tiny))
self.log_fnujy_sr_hashed = utils.hashable(tmp)
@lru_cache(maxsize=8)
def rginterpolator(self):
"""Return a regular grid interpolator.
Memoizing doesn't appear to save any time.
This was the chunk of code in fnujy below:
# scipy.RegularGridInterpolator, save a bit of time since the
# interpolator object is the same for each model. first we
# create grid points we want, each row is just the different
# filter numbers assigned above with the parameters appended
# pargrid = np.tile(par,len(wave_arr)).reshape((len(wave_arr),len(par)))
# pargrid = np.insert(pargrid,0,wave_arr,axis=1)
# f = self.rginterpolator()
# fluxes = f(pargrid)
"""
if isinstance(self,PhotModel):
wave_arr = np.arange(len(self.filters))
elif isinstance(self,SpecModel):
wave_arr = np.arange(len(self.wavelength))
points = (wave_arr,)
for param in self.parameters:
points += (self.param_values[param],)
f = RegularGridInterpolator(points,self.fnujy_sr,
bounds_error=False,fill_value=np.inf)
return f
def fnujy(self,param):
"""Return fluxes for a model with a specific solid angle
Parameter in addition to those specificed for the model is the
log10 of the area in steradian in units of Solar radii at 1pc,
appended.
This is spline interpolation. This doesn't matter too much since
any high dynamic range parameters are already log spaced.
TODO: this is the core of the sdf code in terms of execution
time. Experiments so far find that map_coordinates is faster
than RegularGridInterpolator, but is hindered somewhat by the
need to do interpolation first to find the grid points needed by
map_coordinates. np.interp seems to be faster than scipy
UnivariateSpline or simple 1pt interpolation for this step.
"""
# prepend this to the interpolation to return the results at all
# filters/wavelengths
wave_arr = self.i
nwav = self.n_i
# make sure par is a numpy array
par_len = len(param)-1
area_sr = cfg.ssr * 10**( param[-1] )
par = param[:par_len]
# scipy.ndimage.map_coordinates, only real difference compared
# to RegularGridInerpolator is that the coordinates are given
# in pixels, so must be interpolated from the parameters first
# using a homegrown 1pt interpolation linterp was no faster than
# np.interp
coords = []
for i,p in enumerate(self.parameters):
coords.append( np.interp( par[i],self.param_values[p],
np.arange(len(self.param_values[p])) ) )
pargrid = np.tile(np.array(coords),nwav).\
reshape( (nwav,par_len) )
pargrid = np.insert(pargrid,0,wave_arr,axis=1)
# interpolation, sped up by doing spline_filter first and
# memoizing the result, order must be the same in both calls
ff = utils.spline_filter_mem(self.log_fnujy_sr_hashed,order=2)
fluxes = map_coordinates(ff,pargrid.T,order=2,prefilter=False)
# convert back to real fluxes
fluxes = 10**fluxes
# hack to avoid negative fluxes arising from ringing
fluxes[fluxes<cfg.tiny] = cfg.tiny
# per-filter normalisation for photometry (leave colours)
if isinstance(self,PhotModel):
filt = filter.iscolour(tuple(self.filters.tolist()))
norm = np.zeros(len(self.filters)) + area_sr
if np.any(filt):
norm[filt] = 1.0
else:
norm = area_sr
return norm * fluxes
def copy(self):
"""Return a copy"""
return copy.deepcopy(self)
def param_shape(self):
"""Get the shape of the parameter values"""
dim = ()
for param in self.parameters:
dim += ( len(self.param_values[param]), )
return dim
@property
def name(self):
return self._name
@name.setter
def name(self,value):
self._name = utils.validate_string(value)
@property
def parameters(self):
return self._parameters
@parameters.setter
def parameters(self,value):
self._parameters = utils.validate_1d(value,None,dtype=str)
@property
def param_values(self):
return self._param_values
@param_values.setter
def param_values(self,value):
self._param_values = utils.validate_dict(value)
@property
def filters(self):
return self._filters
@filters.setter
def filters(self,value):
self._filters = utils.validate_1d(value,None,dtype=str)
@property
def wavelength(self):
return self._wavelength
@wavelength.setter
def wavelength(self,value):
self._wavelength = utils.validate_1d(value,None)
@property
def fnujy_sr(self):
return self._fnujy_sr
@fnujy_sr.setter
def fnujy_sr(self,value):
if self.parameters is None:
expected_dim = None
else:
expected_dim = len(self.parameters) + 1 # extra for area_sr
value = utils.validate_nd(value,expected_dim)
if self.param_values is not None:
if isinstance(self,PhotModel):
if value.shape[0] != len(self.filters):
raise utils.SdfError("expected {} elements in first dim,got {}".
format(len(self.filters),value.shape[0]))
elif isinstance(self,SpecModel):
if value.shape[0] != len(self.wavelength):
raise utils.SdfError("expected {} elements in first dim, got {}".
format(len(self.wavelength),value.shape[0]))
for i,key in enumerate(self.parameters):
if len(self.param_values[key]) != value.shape[i+1]:
raise utils.SdfError("expected dimension {} to have size {}\
but got {}".format(i,
len(self.param_values[key]),
value.shape[i+1]))
self._fnujy_sr = value
class PhotModel(Model):
"""Class to hold model grids with convolved fluxes
The main array is a cube with n+1 dimensions, where n is the number
of parameters for a given model, found in the ConvolvedModel class.
The dimensions are [nf,p1,p2,...,pn], where px is a parameter range
and nf is the number of filters.
Colour_bases provides info where a filter is a colour/index, in the
form of an array of dicts, each of which holds an array for the
*relative* indices (i.e. i_filter - i_colour) locating the base
filters for that colour, and an array of their additive weights
(when in mags). This info is not stored for now, but populated when
the PhotModel is read in. """
def __init__(self,name=None,parameters=None,param_values=None,
filters=None,colour_bases=None,fnujy_sr=None):
self.name = name
self.parameters = parameters
self.param_values = param_values
self.filters = filters
self.colour_bases = colour_bases
self.fnujy_sr = fnujy_sr
def write_model(self,name,overwrite=False):
"""Write PhotModel as a FITS file.
The location to write to is given by config. Directory is
created if it doesn't exist.
Parameters
----------
name : str
The name of the model to write, this dictates location and
name of the file as [name]_PhotModel.fits.
overwrite : bool, optional
Force overwrite of extant file.
"""
dir = cfg.file['model_root']+'/'+name+'/'
if not os.path.exists(dir):
os.mkdir(dir)
self.write_file(dir+name+'_PhotModel.fits',overwrite=overwrite)
@lru_cache(maxsize=32)
def read_model(name):
"""Read a named model, location given in config"""
self = Model.read_file(cfg.model_loc[name]+name+'_PhotModel.fits')
self.fill_colour_bases()
return self
@classmethod
def cmlist2model(cls,cm):
"""Turn a list of ConvolvedModel objects into a Model
"""
self = cls()
# check for consistency as we go
self.name = cm[0].name
self.parameters = cm[0].parameters
self.param_values = cm[0].param_values
filters = np.array([])
cubedim = np.append( len(cm), cm[0].param_shape() )
cube = np.ndarray( cubedim, dtype=float )
for i in range(len(cm)):
filters = np.append(filters,cm[i].filter)
if not self.name == cm[i].name:
raise utils.SdfError("name {} in {} not the same as {} in {}".
format(cm[i].name,files[i],self.name,files[0]))
if not np.all(self.parameters == cm[i].parameters):
raise utils.SdfError("parameters {} in {} not the same as {} in {}".
format(cm[i].parameters,
files[i],self.parameters,files[0]))
for param in cm[i].parameters:
if not np.all( np.equal(self.param_values[param],
cm[i].param_values[param]) ):
raise utils.SdfError("parameter {} values {} in {}\
not the same as {} in {}".
format(param,cm[i].param_values[param],
files[i],self.param_values[param],
files[0]))
cube[i] = cm[i].fnujy_sr
self.filters = filters
self.fnujy_sr = cube
return self
@classmethod
def read_convolved_models(cls,name,filters='all'):
"""Load all model files for a given set of filters
"""
# full models, avoid reading if they exist
models = glob.glob(cfg.model_loc[name]+name+'_*Model.fits')
# array of models, one element for each filter
files = glob.glob(cfg.model_loc[name]+'*fits')
for model in models:
if model in files:
files.remove(model)
cm = [convolve.ConvolvedModel() for i in range(len(files))]
for i,f in enumerate(files):
cm[i] = convolve.ConvolvedModel.read_file(f)
self = PhotModel.cmlist2model(cm)
if filters != 'all':
self.keep_filters(filters)
return self
def fill_colour_bases(self):
"""Fill in colour_bases info.
Fill an attribute called colour_bases which is a dict pointing
to where base filters for colours/indices are. The indices are
relative to the colour locations.
See Also
--------
model.PhotModel.keep_filters
"""
self.colour_bases = [[] for i in self.filters]
for k,f in enumerate(self.filters):
if filter.iscolour(f):
col = filter.Colour.get(f)
filteri = []
for i,cf in enumerate(col.filters):
fi = np.where( cf == self.filters )[0][0]
filteri.append( fi - k )
self.colour_bases[k] = {'filteri':filteri,
'filterw':col.weights}
def keep_filters(self,filternames,colour_bases=False):
"""Keep only desired filters from a PhotModel.
Parameters
----------
filternames : list
A list of the filter names to keep from the model. Duplicate
filters are kept, as is the order, as each slice in the
model must line up with the corresponding one from the
Photometry that was read in.
colour_bases : bool, optional
Keep the base filters that are used to compute
colours/indices. These are added to the end of the model,
beyond the filters that were asked for.
See Also
--------
model.PhotModel.fill_colour_bases
"""
keep = np.array([],dtype=bool)
extras = np.array([])
for f in filternames:
# grab base filters for colours
if filter.iscolour(f):
col = filter.Colour.get(f)
extras = np.append(extras,col.filters)
if f in self.filters:
fi = np.where(f == self.filters)[0]
keep = np.append( keep, fi )
else:
raise utils.SdfError("filter {} not found in PhotModel. "
"This probably means the PhotModel "
"needs to be updated (using "
"model_setup.setup_phot()).".format(f))
# now add the base filters
if len(extras) > 0 and colour_bases:
for f in extras:
if f in self.filters:
fi = np.where(f == self.filters)[0]
if fi not in keep:
keep = np.append( keep, fi )
self.filters = self.filters[keep]
self.fnujy_sr = self.fnujy_sr[keep]
if colour_bases:
self.fill_colour_bases()
else:
self.colour_bases = []
self.i = np.arange(len(self.filters))
self.n_i = len(self.i)
# and update the hashed version
self.fill_log_fnujy_sr_hashed()
class SpecModel(Model):
"""Class to hold grids of model spectra
The main array is a cube with n+1 dimensions, where n is the number
of parameters for a given model. The dimensions are
[nw,p1,p2,...,pn], where px is a parameter range and nw is the
number of wavelengths.
"""
def __init__(self,name=None,parameters=None,param_values=None,
wavelength=None,fnujy_sr=None):
self.name = name
self.parameters = parameters
self.param_values = param_values
self.wavelength = wavelength
self.fnujy_sr = fnujy_sr
def write_model(self,name,overwrite=False):
"""Write SpecModel as a FITS file.
The location to write to is given by config. Directory is
created if it doesn't exist.
Parameters
----------
name : str
The name of the model to write, this dictates location and
name of the file as [name]_PhotModel.fits.
overwrite : bool, optional
Force overwrite of extant file.
"""
dir = cfg.file['model_root']+'/'+name+'/'
if not os.path.exists(dir):
os.mkdir(dir)
self.write_file(dir+name+'_SpecModel.fits',overwrite=overwrite)
@lru_cache(maxsize=32)
def read_model(name):
"""Read a named model, location given in config."""
return Model.read_file(cfg.model_loc[name]+name+'_SpecModel.fits')
@classmethod
def read_kurucz(cls,file):
"""Read a Kurucz model grid file and return a SpecModel.
The model grid will almost certainly not be filled out
completely, so require some cropping before it can be used.
"""
self = cls()
# get the spectra, all have the same wavelength grid
m,teff,logg,mh = spectrum.ModelSpectrum.read_kurucz(file)
teffarr = np.unique(teff)
loggarr = np.unique(logg)
self.name = m[0].name
self.wavelength = m[0].wavelength
self.parameters = ['Teff','logg']
self.param_values = {'Teff':teffarr,
'logg':loggarr}
# put spectra in their place
self.fnujy_sr = np.zeros((len(self.wavelength),
len(teffarr),
len(loggarr)),dtype=float)
for i,mod in enumerate(m):
j = np.where(teff[i] == teffarr)[0][0]
k = np.where(logg[i] == loggarr)[0][0]
self.fnujy_sr[:,j,k] = mod.fnujy_sr
# see if the grid was filled (spectrum.read_kurucz sets any
# zero values in the spectra to cfg.tiny)
if np.min(self.fnujy_sr) < cfg.tiny:
print("WARNING: model grid not filled, spectra with zeros exist")
return self
@classmethod
def bb_disk_r(cls,name='bb_disk_r',
wavelengths=cfg.models['default_wave'],
temperatures=10**np.arange(0,3,0.1),
lam0=None, beta=None,
write=False,overwrite=False):
"""Generate a set of blackbody spectra."""
# don't do the calculation if there will be a write error
if write and overwrite == False:
if os.path.exists(cfg.model_loc[name]+name+'.fits'):
raise utils.SdfError("{} exists, will not overwrite".
format(cfg.model_loc[name]+name+'.fits'))
self = cls()
m = [spectrum.ModelSpectrum.bnu_wave_micron(wavelengths,t,
lam0=lam0,
beta=beta)\
for t in temperatures]
self.name = m[0].name
self.wavelength = m[0].wavelength
if 'star' in name:
self.parameters = ['Teff']
self.param_values = {'Teff':temperatures}
else:
self.parameters = ['log_Temp']
self.param_values = {'log_Temp':np.log10(temperatures)}
# put spectra in their place
self.fnujy_sr = np.zeros((len(self.wavelength),
len(temperatures)),dtype=float)
for i,mod in enumerate(m):
self.fnujy_sr[:,i] = mod.fnujy_sr
if write:
self.write_model(name,overwrite=overwrite)
return self
@classmethod
def modbb_disk_r(cls,name='modbb_disk_r',
wavelengths=cfg.models['default_wave'],
temperatures=10**np.arange(0,3,0.1),
lam0=10**np.arange(1,3,0.1),
beta=np.arange(0,3,0.1),
write=False,overwrite=False):
"""Generate a set of modified blackbody spectra"""
# don't do the calculation if there will be a write error
if write and overwrite == False:
if os.path.exists(cfg.model_loc[name]+name+'.fits'):
raise utils.SdfError("{} exists, will not overwrite".
format(cfg.model_loc[name]+name+'.fits'))
self = cls()
self.fnujy_sr = np.zeros((len(wavelengths),
len(temperatures),
len(lam0),
len(beta)),dtype=float)
for i,temp in enumerate(temperatures):
for j,l0 in enumerate(lam0):
for k,b in enumerate(beta):
m =spectrum.ModelSpectrum.bnu_wave_micron(wavelengths,temp,
lam0=l0,beta=b)
self.fnujy_sr[:,i,j,k] = m.fnujy_sr
self.name = m.name
self.wavelength = m.wavelength
self.parameters = ['log_Temp','log_lam0','beta']
self.param_values = {'log_Temp':np.log10(temperatures)}
self.param_values['log_lam0'] = np.log10(lam0)
self.param_values['beta'] = beta
if write:
self.write_model(name,overwrite=overwrite)
return self
@classmethod
def modbb_disk_dr(cls,name='modbb_disk_dr',
wavelengths=cfg.models['default_wave'],
t_in_min=200.0, t_in_max=2000.0, n_t_in=20,
t_out_min=10.0, t_out_max=100.0, n_t_out=10,
alpha=np.linspace(-2,2,17),
beta=np.linspace(0,2,9),
write=False,overwrite=False):
"""Generate a set of wide-disk modified blackbody spectra.
The disk area is normalised so that the full area is one. Thus
very wide disks (those with cool outer edges) are much fainter
in terms of model flux than smaller ones.
Parameters
----------
name : str, optional
Name of the model.
wavelengths : array, optional
Array of wavelengths for the model.
t_in_min : float, optional
Minimum inner edge temperature.
t_in_max : float, optional
Maximum inner edge temperature.
n_t_in : int, optional
Number of inner temperatures.
t_out_min : float, optional
Minimum outer edge temperature.
t_out_max : float, optional
Maximum inner edge temperature.
n_t_out : int, optional
Number of outer temperatures.
alpha : array, optional
Array of power law indices for optical depth.
beta : array, optional
Array of betas, lambda_0's fixed near blackbody peak.
write : bool, optional
Write the model to disk.
overwrite : bool, optional
Overwrite any existing model.
"""
# don't do the calculation if there will be a write error
if write and overwrite == False:
if os.path.exists(cfg.model_loc[name]+name+'.fits'):
raise utils.SdfError("{} exists, will not overwrite".
format(cfg.model_loc[name]+name+'.fits'))
self = cls()
# set up temperature arrays
log_t_in = np.linspace(np.log10(t_in_min),np.log10(t_in_max),n_t_in)
t_in = 10**log_t_in
log_t_out = np.linspace(np.log10(t_out_min),np.log10(t_out_max),n_t_out)
t_out = 10**log_t_out
self.fnujy_sr = np.zeros((len(wavelengths),
n_t_in,n_t_out,len(alpha),
len(beta)),dtype=float)
# loop to fill model
n_r = 100
for i,t1 in enumerate(t_in):
for j,t2 in enumerate(t_out):
for k,a in enumerate(alpha):
for l,b in enumerate(beta):
tau_tot = 0.0
# generate temps and a range of pseudo radii
t_edges = 10**np.linspace(np.log10(t1),
np.log10(t2),
n_r+1, dtype=float)
temps = (t_edges[1:]+t_edges[:-1])/2.
radius = lambda x: (278.3/x)**2
r = radius(temps)
r_edges = radius(t_edges)
dr = np.diff(r_edges)
tau = r**a
for x in range(n_r):
m = spectrum.ModelSpectrum.bnu_wave_micron(
wavelengths,
temps[x],
lam0=3*2900.0/temps[x],
beta=b
)
annulus_area = tau[x] * 2.0*np.pi*r[x]*dr[x]
spec = m.fnujy_sr * annulus_area
self.fnujy_sr[:,i,j,k,l] += spec
tau_tot += annulus_area
# normalise area for all models
self.fnujy_sr[:,i,j,k,l] /= tau_tot
self.name = m.name
self.wavelength = m.wavelength
self.parameters = ['log_T_in','log_T_out','alpha','beta']
self.param_values = {'log_T_in': log_t_in}
self.param_values['log_T_out'] = log_t_out
self.param_values['alpha'] = alpha
self.param_values['beta'] = beta
if write:
self.write_model(name,overwrite=overwrite)
return self
@classmethod
def sd_spectra(cls, name='sd_disk_r',
wavelengths=cfg.models['default_wave'],
temperatures=10**np.arange(0,3.01,0.1),
smin=10**np.arange(-1,2.01,0.1),
q=np.arange(1.67,2.001,0.02),
smax=100000, nsz=100,
write=False,overwrite=False):
"""Generate a set of size distribution models.
Simple analytic grain model after Backman & Paresce, assumes
that grains have Qabs that is 1 for s<pi.lambda, and Qabs
decreasing as lambda^-n beyond. What n is depends on the dust
properties, but it appears to be >1 because some disks have
(sub)mm slopes steeper than Fnu oc nu^3. Models look like they
have n~2, e.g. Draine astrosilicate.
The sub-mm slopes are not as steep as can be obtained from the
real grain models. This is something to do with the details of
the absorption/emission efficiencies.
This models assumes that the peak wavelength of the stellar
spectrum peaks at lambda shorter than the grain size to estimate
the temperatures, which is a bit suspect but necessary for an
analytic solution. The results are a weak function [T^(n/(4+n)]
of the stellar temperature anyway.
"""
# constants
xt = np.pi # turnover in Qabs, pi is like "real" temperatures
n = 2.0 # slope of Qabs beyond xt, 2 is like "real" dust
cw = 5100. # peak of blackbody emission in micron/K
ts = 6000. # assumed stellar temperature
self = cls()
self.fnujy_sr = np.zeros((len(wavelengths),
len(temperatures),
len(smin),
len(q)),dtype=float)
for i,tbb in enumerate(temperatures):
for j, smini in enumerate(smin):
for k, qi in enumerate(q):
# sizes
s = 10**np.linspace(np.log10(smini),
np.log10(smax), nsz)
logs = np.log10(s)
# calculate grain temperatures
dbb = cw / tbb / xt
dsm = cw / ts / xt
sm = s < dsm
bb = s > dbb
s_temp = tbb * (dbb/s)**(n/(4+n))
s_temp[sm] = tbb**(4/(4+n)) * ts**(n/(4+n))
s_temp[bb] = tbb
# compute bnu for each size
bnu = np.ones((len(s), len(wavelengths)))
for l, st in enumerate(s_temp):
bnu[l,:] = utils.bnu_wav_micron(wavelengths, st)
# qabs
qabs = np.ones((len(s), len(wavelengths)))
for l, si in enumerate(s):
x = wavelengths / si
gtx = x > xt
qabs[l,gtx] = (xt/x[gtx])**n
# add up size distribution, this is taken straight
# from Wyatt's IDL sigmadbar
qfact = 5 - 3*qi
sigmadbar = qfact * np.log(10) * \
(10**(logs*qfact)) / \
(smax**qfact - smini**qfact)
for l in range(len(wavelengths)):
self.fnujy_sr[l,i,j,k] = \
utils.sdf_int(qabs[:,l]*bnu[:,l]*sigmadbar, logs)
# return wavelengths, s, s_temp, bnu, qabs, sigmadbar, self.fnujy_sr[:]
self.name = 'sd'
self.wavelength = wavelengths
self.parameters = ['log_Temp','log_Dmin','q']
self.param_values = {'log_Temp': np.log10(temperatures)}
self.param_values['log_Dmin'] = np.log10(smin)
self.param_values['q'] = q
if write:
self.write_model(name,overwrite=overwrite)
return self
def interp_to_wavelengths(self,wavelength,log=True):
"""Interpolate the model to the given wavelengths."""
# TODO: this only needs to be run at the beginning of a fit but
# is very slow, especially when there are spectra, speed it up!
# TODO: this is straight linear/log interpolation, but could
# smooth the spectra first since the given wavelength grid will
# almost certainly be near the spectral resolution of whatever
# instrument it came from. Probably use resample.
# check we need to do something
if np.all(self.wavelength == wavelength):
return
if log:
neg = self.fnujy_sr <= 0.0
self.fnujy_sr[neg] = cfg.tiny
cube = np.log10(self.fnujy_sr)
wave = np.log10(self.wavelength)
wave_interp = np.log10(wavelength)
else:
cube = self.fnujy_sr
wave = self.wavelength
wave_interp = wavelength
# get a function that will return interpolated values, ensure
# error if extrapolation in wavelength requested (i.e. model
# doesn't cover as wide as was requested)
f = interp1d(wave,cube,axis=0,kind='linear',
bounds_error=True)
cube_interp = f(wave_interp)
self.wavelength = wavelength
if log:
self.fnujy_sr = np.power(10,cube_interp)
else:
self.fnujy_sr = cube_interp
self.i = np.arange(len(self.wavelength))
self.n_i = len(self.i)
# and update the hashed version
self.fill_log_fnujy_sr_hashed()
def model_fluxes(m,param,obs_nel,phot_only=False):
"""Get model fluxes and put in arrays.
all_fnu is everything added up, with colours/indices added properly,
comp_fnu[i] contains fluxes from the i-th model component and
comp_fnu_col[i] contains these with colours computed.
"""
comp_fnu = []
all_fnu = []
i0 = 0
# loop over model components
for comp in m:
# loop over phot/spectra for this component if they exist
if not isinstance(comp,tuple):
comp = (comp,)
flux = np.array([])
# params same for all in each component
nparam = len(comp[0].parameters)+1
for mod in comp:
if phot_only:
if not isinstance(mod,PhotModel):
continue
fnu = mod.fnujy(param[i0:i0+nparam])
flux = np.append( flux, fnu )
# since we don't know how long all_fnu will be, make sure
# comp_fnu has first dimension equal number of components
if len(all_fnu) == 0:
all_fnu = flux
comp_fnu = np.array([flux])
else:
all_fnu += flux
comp_fnu = np.vstack( (comp_fnu,flux) )
i0 += nparam
# fill colours, for total and components
mod_fnu = fill_colours(m[0],all_fnu,obs_nel)
comp_fnu_col = np.zeros((len(comp_fnu),len(mod_fnu)))
for i,fnu in enumerate(comp_fnu):
comp_fnu_col[i] = fill_colours(m[0],fnu,obs_nel)
return mod_fnu,comp_fnu_col
def crop(m,param,range):
"""Crop a model to ranges specified for a parameter."""
out = m.copy()
if param not in out.parameters:
raise utils.SdfError("parameter {} not in model (has {})".
foramt(param,out.parameters))
# get axis to cut, and locations
ax = np.where(out.parameters == param)[0][0] + 1
locs = np.searchsorted(out.param_values[param],range)
print("cutting parameters along axis {} to indices {}".
format(ax,locs))
out.param_values[param] = out.param_values[param][locs[0]:locs[1]]
arrin = out.fnujy_sr
arr = np.rollaxis(out.fnujy_sr,ax)
arr = arr[locs[0]:locs[1]]
out.fnujy_sr = np.rollaxis(arr,0,ax+1)
print("cropped model from {} to {}".
format(arrin.shape,out.fnujy_sr.shape))
return out
def reduce_zerod(m,parameters):
"""Reduce a model to zero dimensions (i.e. a spectrum).
Parameters
----------
model : model object
The model to reduce.
parameters : list
Parameter values, excluding the last (normalisation) parameter.
"""
out = m.copy()
# modify the attributes
out.parameters = []
out.param_values = {}
# get the new spectrum and update the hashed version
out.fnujy_sr = m.fnujy(np.append(parameters,-np.log10(cfg.ssr)))
out.fill_log_fnujy_sr_hashed()
return out
def reduce_squeeze(m):
"""Reduce model by removing length=one dimensions."""
out = m.copy()
print('input model has shape:{}'.format(m.param_shape()))
keep = np.array(m.param_shape()) != 1
for k,p in zip(keep,m.parameters):
if not k:
del out.param_values[p]
out.parameters = m.parameters[keep]
out.fnujy_sr = np.squeeze(m.fnujy_sr)
print('output model has shape:{}'.format(out.param_shape()))
return out
def append_parameter(m,name,value):
"""Append a single parameter to a model.
Purpose is to prepare a model for addition models via concat.
"""
out = m.copy()
out.parameters = np.append(out.parameters,name)
out.param_values[name] = np.array([value])
out.fnujy_sr = np.reshape(out.fnujy_sr,out.fnujy_sr.shape+(1,))
return out
def concat(m0,m):
"""Add a model to the one we have.
So far can only add models when both have the same sets of
parameters, so for example joining two models with different
metallicities.
"""
out = m0.copy()
# check types, parameters, wavelengths, filters are the same, allow
# for small differences in wavelengths, which can apparently occur
# when the arrays are calculated on different machines
for i,par in enumerate(out.parameters):
if par != m.parameters[i]:
raise utils.SdfError("parameters {} and {} different".
format(out.parameters,m.parameters))
if type(out) != type(m):
raise utils.SdfError("can't join models of type {} and {}".
format(type(out),type(m)))
if isinstance(out,PhotModel):
for i,filt in enumerate(out.filters):
if filt != m.filters[i]:
raise utils.SdfError("filters {} and {} different".
format(out.filters,m.filters))
if isinstance(out,SpecModel):
if not np.allclose(out.wavelength,m.wavelength,rtol=1e-12,atol=1e-12):
raise utils.SdfError("wavelengths {} and {} different".
format(out.wavelength,m.wavelength))
# parameters to add and their locations in out
padd = []
pax = []
ploc = []
for i,p in enumerate(out.parameters):
if not np.all(np.equal(out.param_values[p],m.param_values[p])):
pax.append(i+1) # +1 since first dim is wav/filters
padd.append(p)
arrs = np.split(m.fnujy_sr,len(m.param_values[p]),axis=i)
for val in m.param_values[p]:
ploc.append( np.searchsorted(out.param_values[p],val) )
if len(padd) != 1:
raise utils.SdfError("model parameters can't be joined (padd={})".
format(padd))
else:
padd = padd[0]
print("Adding parameter {} at location(s) {} along axis {}".
format(padd,ploc,pax))
for i,loc in enumerate(ploc):
if m.param_values[padd][i] in out.param_values[padd]:
raise utils.SdfError("model already has {}={}".
format(padd,m.param_values[padd][i]))
print(" adding {}={} (dims {} to {}) at {}".
format(padd,m.param_values[padd][i],
arrs[i].squeeze().shape,out.fnujy_sr.shape,loc))
out.param_values[padd] = np.insert(out.param_values[padd],
loc,m.param_values[padd][i])
out.fnujy_sr = np.insert(out.fnujy_sr,loc,
arrs[i].squeeze(),axis=pax[i])
print(" new {} array is {}".format(padd,out.param_values[padd]))
return out
def fill_colours(comp,mod_fnu,obs_nel):
"""Replace locations in mod_fnu with colours
Colours cannot simply be added without knowing the absolute
measurements. The model components have already been added so we
only need to figure out where the filters associated with the
colours are, and calculate the colours from these.
By using obs_nel, the number of observed fluxes/colours per
Phot/SpecModel component, the extra columns containing the base
filters for colours are not included in the returned result.
"""
final_fnu = np.array([])
if not isinstance(comp,tuple):
comp = (comp,)
i0comp = 0 # zeroth index of the current Phot/SpecModel
for k,mod in enumerate(comp):
if isinstance(mod,PhotModel):
# loop over each filter/index in the model, skip
# if there are no colour_bases (i.e. a filter)
for i,cb in enumerate(mod.colour_bases):
if len(cb) > 0:
mags = 0.0
# loop over the filters in this colour/index
# and add the magnitude with the correct weight
# for this colour/index
for j,filteri in enumerate(cb['filteri']):
fname = mod.filters[i+filteri]
irel = i0comp+i+cb['filteri'][j]
filt = filter.Filter.get(fname)
mag = filt.flux2mag(mod_fnu[irel])
mags += mag * cb['filterw'][j]
mod_fnu[i] = mags
final_fnu = np.append(final_fnu,
mod_fnu[i0comp:i0comp+obs_nel[k]])
i0comp += len(mod.filters)
else:
final_fnu = np.append(final_fnu,
mod_fnu[i0comp:i0comp+obs_nel[k]])
i0comp += len(mod.wavelength)
return final_fnu
def get_models(obs,names):
"""Get tuples of models for given observations.
Returns two tuples of models, the first is for fitting and contains
extra filters beyond the set in the observations if there are
colours in the photometry. The second exludes the extra filters, and
has a max of one spectrum (for plotting purposes).
"""
allmod = ()
fullmod = ()
for name in names:
omod = ()
fmod = ()
# load models we will need
ph = PhotModel.read_model(name)
sp = SpecModel.read_model(name)
for o in obs:
if isinstance(o,photometry.Photometry):
phmod = ph.copy()
phmod.keep_filters(o.filters,colour_bases=True)
omod = omod + (phmod,)
phmod = ph.copy()
phmod.keep_filters(o.filters,colour_bases=False)
fmod = fmod + (phmod,)
if isinstance(o,spectrum.ObsSpectrum):
spmod = sp.copy()
spmod.interp_to_wavelengths(o.wavelength)
omod = omod + (spmod,)
# always want a full spectrum in full model, but only one
fmod = fmod + (sp,)
allmod = allmod + (omod,)
fullmod = fullmod + (fmod,)
return allmod,fullmod
def models_info(m):
"""Return some info for a tuple of model(s).
Also do some basic sanity checking.
Here is where the ranges for model and spectra normalisation is set.
"""
info = {}
info['name'] = ''
info['ndim'] = 0
info['ncomp'] = []
info['nspec'] = []
info['type'] = []
info['p_rng'] = []
info['parameters'] = []
info['nmodels'] = len(m)
for comp in m:
nspec = 0
if not isinstance(comp,tuple):
comp = (comp,)
info['ncomp'].append(len(comp))
if info['name'] == '':
info['name'] = comp[0].name
else:
info['name'] += cfg.fitting['model_join']+comp[0].name
info['ndim'] += len(comp[0].parameters)+1
for par in comp[0].parameters:
info['p_rng'].append( (comp[0].param_values[par][0],
comp[0].param_values[par][-1]) )
info['parameters'].append( par )
# this is the range of allowed solid angles
info['p_rng'].append( cfg.fitting['model_om_range'] )
info['parameters'].append('norm')
for mod in comp:
if isinstance(mod,SpecModel):
nspec += 1
info['type'].append(type(mod))
info['nspec'].append(nspec)
info['ndim'] += info['nspec'][0]
# spectra normalisations last (nspec same for each comp)
for i in range(nspec):
info['p_rng'].append( cfg.fitting['spectra_norm_range'] )
info['parameters'].append('spec_norm')
# check structure looks OK
if len(np.unique(info['ncomp'])) > 1:
raise utils.SdfError("model structure {} should have same number of\
subcomponents in each component, not {}".
format(m,info['ncomp']))
if len(np.unique(info['nspec'])) > 1:
raise utils.SdfError("model structure {} should have same number of spectra\
in each component, not {}".format(m,info['nspec']))
return info
| {
"repo_name": "drgmk/sdf",
"path": "sdf/model.py",
"copies": "1",
"size": "47700",
"license": "mit",
"hash": 5627041390633751000,
"line_mean": 35.3290175171,
"line_max": 90,
"alpha_frac": 0.5379035639,
"autogenerated": false,
"ratio": 3.9581777445855115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49960813084855116,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
import os
import glob
import binarytree as bt
import numpy as np
import emcee
# in case we don't have this module
try:
import classifier.photometry
import classifier.spectra
classifier_module = True
except ImportError:
classifier_module = False
from . import model
from . import photometry
from . import spectrum
from . import filter
from . import utils
from . import result
from . import db
from . import config as cfg
# these are for getting info to pymultinest
global_obs = ()
global_mod = ()
global_p_rng = ()
def fit_results(file,update_mn=False,update_an=False,
update_json=False,update_thumb=False,
sort=True,custom_sort=True,nospec=False):
"""Return a list of fitting results.
Parameters
----------
file : str
The raw photometry file to use as input.
update_mn : bool, optional
Force update of multinest fitting.
update_an : bool, optional
Force update of post-multinest fitting analysis.
sort : bool, optional
Sort results by decreasing evidence.
custom_sort: bool, optional
Additonally sort results using per-target config.
nospec : bool, optional
Exclude observed specta from fitting (for speed).
"""
print(" Fitting")
results = []
# fit specific models if defined by conf, overrides default
if len(cfg.fitting['models']) > 0:
for m in cfg.fitting['models']:
print(" ",m)
r = result.Result.get(
file,m,update_mn=update_mn,
update_an=update_an,update_json=update_json,
update_thumb=update_thumb,nospec=nospec
)
# check for files with no photometry
if not hasattr(r,'obs'):
print(" no photometry = no results")
return None
results.append(r)
else:
# binary tree-based fitting
t = model_director(file)
try:
print_model_tree(t)
except:
print_model_tree(t, cute=False)
while t.left is not None and t.right is not None:
print(" ",t.left.value,"vs.",t.right.value)
r1 = result.Result.get(
file,t.left.value,update_mn=update_mn,
update_an=update_an,update_json=update_json,
update_thumb=update_thumb,nospec=nospec
)
r2 = result.Result.get(
file,t.right.value,update_mn=update_mn,
update_an=update_an,update_json=update_json,
update_thumb=update_thumb,nospec=nospec
)
# check for files with no photometry
if not hasattr(r1,'obs'):
print(" no photometry = no results")
return None
# append results, only append left result at start since where
# on lower branches the left model has already been done
if len(results) == 0:
results.append(r1)
results.append(r2)
# move on down the tree
if r2.evidence > r1.evidence + cfg.fitting['ev_threshold']:
t = t.right
else:
t = t.left
# sort list of results by evidence (required for subsequent custom sort)
print(' Sorting')
if sort or custom_sort:
print(' sorting results by evidence')
results = [results[i] for i in result.sort_results(results)]
else:
print(' no results sorting')
# sort list of results by custom method
if custom_sort:
print(' applying db.custom_sort results sorting')
srt = db.custom_sort(file, results)
if srt is None:
print(" couldn't get config")
else:
results = [results[i] for i in srt]
# save a thumb of the best fit next to the input file, update every
# time since we may have changed best fit (but not fitting itself)
results[0].sed_thumbnail(file='{}/{}_thumb.png'.format(results[0].path, results[0].id), update=True)
return results
def model_director(file,reddening=False,use_classifier=False):
"""Workflow for model fitting.
Parameters
----------
file : str
Name of the photometry file we are fitting.
reddening : bool, optional
Use models with reddening.
use_classifier : bool, optional
Use classifier.
"""
# default model tries star + up to two bb components
if reddening:
star = 'phoenix_m_av'
else:
star = 'phoenix_m'
t_star = model_tree(top=(star,), extra='modbb_disk_r',n_extra=2)
# cool star model
if reddening:
cool = 'phoenix_cool_av'
else:
cool = 'phoenix_cool'
t_cool = model_tree(top=(cool,), extra='modbb_disk_r')
# look for spectral type, LTY types get cool models, other types
# default to star models, and M5-9 (or just M) get both
tree = t_star
cool = 0
kw = utils.get_sdb_keywords(file)
if 'sp_type' in kw.keys():
if kw['sp_type'] is None:
pass
elif kw['sp_type'][0] in 'LTY':
tree = t_cool
cool = 2
elif kw['sp_type'][0] == 'M':
if len(kw['sp_type']) > 1:
if kw['sp_type'][1] not in '56789':
pass
elif kw['sp_type'][0:2] == 'dM':
if len(kw['sp_type']) > 2:
if kw['sp_type'][2] not in '56789':
pass
cool = 1
tree = bt.Node(('top',))
tree.left = t_cool
tree.right = t_star
tree.value = ('top',)
return tree
def model_tree(top=('phoenix_m',),extra='modbb_disk_r',n_extra=1):
"""Return a binary tree for alternative models.
Parameters
----------
top : str, optional
Name of the first model.
extra : str, optional
Name of the additional model component.
n_extra : int, optional
Include two extra component branch.
"""
# the top node doesn't matter unless this tree becomes a branch
# of a bigger tree
t = bt.Node( top )
t.left = bt.Node( top )
t.right = bt.Node( top + (extra,) )
if n_extra == 2:
t.right.left = bt.Node( top + (extra,) )
t.right.right = bt.Node( top + (extra, extra) )
return t
def print_model_tree(t, cute=True):
"""Print a model tree, shortening the model names.
Unicode symbols here https://unicode-table.com/en/
Parameters
----------
t : binary tree
Tree to print.
cute : bool, optional
Print cute ascii symbols.
"""
if cute:
r = {'top':u'\u2602',
'phoenix_m':u'\u2606',
'phoenix_m_av':u'\u2605',
'phoenix_cool':u'\u2733',
'phoenix_cool_av':u'\u2739',
'modbb_disk_r':u'\u29b8',
'bb_disk_r':u'\u25cb'
}
else:
r = {'top':'t',
'phoenix_m':'p',
'phoenix_m_av':'pr',
'phoenix_cool':'c',
'phoenix_cool_av':'cr',
'modbb_disk_r':'mb',
'bb_disk_r':'b'
}
l = bt.convert(t)
for i in range(len(l)):
x = ()
if l[i] is not None:
for j in range(len(l[i])):
if l[i][j] in r.keys():
x += (r[l[i][j]],)
else:
x += (l[i][j],)
l[i] = x = ''.join(x)
bt.convert(l).show()
@lru_cache(maxsize=2)
def concat_obs(o):
"""Concatenate observations
Concatenate the observations (filters and spectra), not
shifting the normalisation of the spectra, but noting how
many observations there are in each component (as the models
will probably contain more due to colours/indices).
"""
obs_wav = np.array([],dtype=float)
obs_filt = np.array([],dtype=object)
obs_fnu = np.array([],dtype=float)
obs_e_fnu = np.array([],dtype=float)
obs_uplim = np.array([],dtype=bool)
obs_ignore = np.array([],dtype=bool)
obs_bibcode = np.array([],dtype=str)
obs_nel = np.array([],dtype=int)
ispec = -2 # start one extra from end, we will put -1 there for phot
obs_ispec = np.array([],dtype=int)
for obs in o:
if isinstance(obs,photometry.Photometry):
obs_wav = np.append(obs_wav,obs.mean_wavelength())
obs_filt = np.append(obs_fnu,obs.filters)
obs_fnu = np.append(obs_fnu,obs.fnujy)
obs_e_fnu = np.append(obs_e_fnu,obs.e_fnujy)
obs_ispec = np.append(obs_ispec,np.repeat(-1,len(obs.filters)))
obs_uplim = np.append(obs_uplim,obs.upperlim)
obs_ignore = np.append(obs_ignore,obs.ignore)
obs_bibcode = np.append(obs_bibcode,obs.bibcode)
elif isinstance(obs,spectrum.ObsSpectrum):
n = len(obs.wavelength)
obs_wav = np.append(obs_wav,obs.wavelength)
obs_filt = np.append(obs_filt,np.repeat(None,n))
obs_fnu = np.append(obs_fnu,obs.fnujy)
obs_e_fnu = np.append(obs_e_fnu,obs.e_fnujy)
obs_ispec = np.append(obs_ispec,np.repeat(ispec,n))
ispec -= 1
obs_uplim = np.append(obs_uplim,np.zeros(n,dtype=bool))
obs_ignore = np.append(obs_ignore,np.zeros(n,dtype=bool))
obs_bibcode = np.append(obs_bibcode,np.repeat(obs.bibcode,n))
obs_nel = np.append(obs_nel,len(obs.fnujy))
return (obs_fnu,obs_e_fnu,obs_uplim,obs_ignore,obs_ispec,
obs_nel,obs_wav,obs_filt,obs_bibcode)
def residual(param,*args):
"""Return residuals for a model compared to observation.
The structure of the observation and models needs to
match, and the filters and/or wavelengths present within
each of the correspondong observation and model obects
must match.
For pure photometry the args would be:
(Photometry,),(PhotModel,)
Where there are spectra, the parameters for a given set of
(PhotModel,SpecModel) are the same, and the args would be:
(Photometry,ObsSpectrum),((PhotModel,SpecModel),)
For combining multiple models the args would be:
(Ph,ObsSp),((PhMod,SpMod),(PhMod,SpMod))
The parameters are ordered similarly, with each tuple of
models sharing the same parameters, and with the normalisations
for observed spectra last, in the same order as the spectra.
To deal with colours/indices, which need the model components
to be added first, and then derived, PhotModels have extra
elements with the colour/index base filters, which are used
to derive the colours/indices as we go.
"""
o,m = args # observations and models
# concatenate observations
obs_fnu,obs_e_fnu,obs_uplim,obs_ignore,obs_ispec,\
obs_nel,obs_wav,obs_filt,_ = concat_obs(o)
# multiply spectra by appropriate normalisation, ispec starts at
# -2 so we can add 1.0 for photometry at the end of the params
spec_norm = np.take(np.append(param,1.0),obs_ispec)
obs_fnu = obs_fnu * spec_norm
obs_e_fnu = obs_e_fnu * spec_norm
# get model fluxes, including filling of colours/indices
mod_fnu,_ = model.model_fluxes(m,param,obs_nel)
# residuals in significance units, setting zero where (photometry)
# is to be ignored, and for upper limits (but amended below)
resid = np.zeros(len(obs_fnu))
ok = np.invert( np.any([obs_uplim,obs_ignore],axis=0) )
resid[ok] = (obs_fnu[ok] - mod_fnu[ok]) / obs_e_fnu[ok]
# set residual if any 3sigma upper limits exceeded at the 1sigma
# level, ignored and otherwise zero as set above
# see Johnson+2013, MNRAS 436, 2535 for some discussion
for i,lim in enumerate(obs_uplim):
if lim and not obs_ignore[i]:
if mod_fnu[i] > obs_fnu[i]/3.:
resid[i] = -1. * mod_fnu[i] / (obs_fnu[i]/3.)
return resid,obs_wav,obs_filt
def residual_phot(param,*args):
"""Return residuals for a model compared to observation.
This is a version for only photometry.
"""
p,m = args # photometry and model
# get model fluxes
if isinstance(m,(tuple,list)):
model_fnujy = np.zeros(p.nused)
i0 = 0
for mod in m:
nparam = len(mod.parameters)+1
flux = mod.fnujy(param[i0:i0+nparam])
model_fnujy = model_fnujy + flux
i0 += nparam
else:
model_fnujy = m.fnujy(param)
# residuals in significance units
resid = np.zeros(p.nused)
ok = np.invert(p.upperlim)
resid[ok] = (p.fnujy[ok] - model_fnujy[ok]) / p.e_fnujy[ok]
# set residual if any upper limits exceeded, otherwise zero
# (e_fnujy may be zero so resid may be nan)
if np.any(p.upperlim):
lim = p.upperlim
if model_fnujy[lim] > p.fnujy[lim]:
resid[lim] = (p.fnujy[lim]-model_fnujy[lim])/(p.fnujy[lim]/3.)
else:
resid[lim] = np.zeros(np.sum(p.upperlim))
print(resid)
return resid
def chisq(param,*args):
"""Return sum of squared residuals."""
res,_,_ = residual(param,*args)
return np.sum( np.square( res ) )
def lnlike(param,*args):
"""Return log likelihood."""
chi2 = chisq(param,*args)
if np.isfinite(chi2):
return -0.5 * chi2
else:
return -np.inf
def multinest_prior(cube,ndim,nparam):
"""Prior for pymultinest, turns values given in each element
of cube from range 0-1 to the range for that parameter
"""
# get parameter ranges
global global_p_rng
pars = global_p_rng
for i in range(ndim):
cube[i] = pars[i][0] + cube[i] * (pars[i][1]-pars[i][0])
def multinest_lnlike(cube,ndim,nparam):
"""Return log likelihood."""
global global_obs,global_mod
o,m = (global_obs,global_mod)
param = np.array([])
for i in range(ndim):
param = np.append(param,cube[i])
return lnlike(param,o,m)
def multinest(o,m,dir):
"""Run pymultinest to fit model(s) to photometry."""
import pymultinest as pmn
dir = dir.rstrip('/')
m_info = model.models_info(m)
pmn_out = dir+'/'+m_info['name']+cfg.fitting['pmn_model_suffix']
global global_obs,global_mod,global_p_rng
global_obs = o
global_mod = m
global_p_rng = m_info['p_rng']
pmn.run(multinest_lnlike,multinest_prior,m_info['ndim'],
n_live_points=cfg.fitting['n_live'],
n_iter_before_update=cfg.fitting['n_update'],
multimodal=True,sampling_efficiency=0.3,
verbose=cfg.fitting['verb'],
outputfiles_basename=pmn_out)
def pmn_models(dir):
"""Return the models fitted by multinest."""
fs = glob.glob( dir + '/*' + cfg.fitting['pmn_model_suffix'] + '.txt' )
models = ()
mbase = []
for f in fs:
pmn_out = f.rstrip('.txt')
mbase.append(pmn_out)
tmp = pmn_out.rstrip(cfg.fitting['pmn_model_suffix'])
tmp = os.path.basename(tmp)
models += (tmp.split(cfg.fitting['model_join']),)
return mbase,models
def pmn_pc(prob,samples,pcs,axis=0):
"""Return numpy-like percentile for multinest output.
Accounts for probability of samples, which must therefore be given.
Mostly a copy from analyzer.get_stats() in pymultinest.
"""
# sort what to do depending on sample array dimension
if np.ndim(samples) == 3:
# loop over each column
if axis == 2:
out = np.zeros((len(pcs),samples.shape[0],samples.shape[1]))
for i in range(samples.shape[0]):
for j in range(samples.shape[1]):
out[:,i,j] = pmn_pc(prob,samples[i,j,:],pcs)
return out
else:
raise utils.SdfError("axis must be 2 for 3d samples")
if np.ndim(samples) == 2:
if axis == 1:
return pmn_pc(prob,samples.T,pcs) # do for the transpose
# loop over each column
elif axis == 0:
out = np.zeros((len(pcs),samples.shape[1]))
for i in range(samples.shape[1]):
out[:,i] = pmn_pc(prob,samples[:,i],pcs)
return out
else:
raise utils.SdfError("axis must be 0 or 1")
elif np.ndim(samples) > 2:
raise utils.SdfError("can't do more than 2d")
elif np.ndim(samples) == 0:
raise utils.SdfError("need an array/list/tuple of values")
else:
# organise and sort probabilities and samples
if len(prob) != len(samples):
raise utils.SdfError("prob and samples have different lengths"
" {} and {}".format(len(prob),len(samples)))
b = list(zip(prob,samples))
b.sort(key=lambda x: x[1])
b = np.array(b)
b[:,0] = b[:,0].cumsum()
# additional normalisation step, since expect to use sub-samples
b[:,0] /= np.max(b[:,0])
# interpolation function to get percentiles
bi = lambda x: np.interp(x, b[:,0], b[:,1], left=b[0,1], right=b[-1,1])
if isinstance(pcs,(int,float)):
return bi(pcs/100.0)
elif isinstance(pcs,(np.ndarray,list,tuple)):
return np.array([bi(pc/100.0) for pc in pcs])
else:
raise utils.SdfError("wrong type {}".format(type(pcs)))
def weighted_dist(prob):
"""Select samples from distribution, accounting for weights.
Returns boolean array with same length as input, with True
indicating samples to keeep.
Parameters
----------
prob : np.array
Array of probabilities
"""
p_rand = np.random.uniform(high=np.max(prob),size=len(prob))
return prob > p_rand
def sort_evidence(ev_in,ndim):
"""Return argsort for models using evidence.
For a model with more parameters to be preferred, the log evidence
must be more than ev_threshold higher (just higher if the number
of dimensions is the same). Method is to initially sort models by
evidence, and then go through and change things based on these
criteria we are done.
"""
if len(ev_in) != len(ndim):
raise utils.SdfError("length of ev_in ({}) and ndim ({}) not equal".\
format(ev_in,ndim))
if isinstance(ev_in,list):
ev_in = np.array(ev_in)
if isinstance(ndim,list):
ndim = np.array(ndim)
# print(ev_in,ndim)
srt = np.argsort(ndim)
ev_srt = ev_in[srt]
dim_srt = ndim[srt]
order = np.array(srt)
last = np.zeros(len(order))
# print(ev_srt,order)
while not np.all( np.equal(order,last) ):
last = np.array(order)
for i in range(len(ndim)-1):
if ev_in[order][i+1] > ev_in[order][i]+cfg.fitting['ev_threshold']\
or ndim[order][i+1] == ndim[order][i]\
and ev_in[order][i+1] > ev_in[order][i]:
tmp = order[i+1]
order[i+1] = order[i]
order[i] = tmp
# print(i,ev_in[order])
# print(order)
return order
def emcee_prior(param,m):
"""Prior to keep parameters in range allowed by model."""
m_info = model.models_info(m)
p_rng = m_info['p_rng']
for p,rng in zip(param,p_rng):
if p < rng[0] or p > rng[1]:
return -np.inf
return 0.0
def run_emcee(r,nwalkers=8,nstep=100,start_pos=None):
"""Run emcee MCMC fitting."""
if r.models == '':
raise utils.SdfError('result.models empty, use sdf.model.get_models()')
if start_pos is None:
start_pos = [r.best_params + \
r.best_params_1sig*np.random.normal(size=r.model_info['ndim'])
for i in range(nwalkers)]
def emcee_lnlike(param,*args):
o,m = args
return emcee_prior(param,m) + lnlike(param,*args)
sampler = emcee.EnsembleSampler(nwalkers,r.model_info['ndim'],
emcee_lnlike,args=(r.obs,r.models))
pos,lnprob,rstate = sampler.run_mcmc(start_pos, nstep)
return pos,sampler
def find_outliers(r, max_wav=3):
'''Find probable outliers after fitting.
Loops through observations shorter than some wavelength, ignoring
each point, rescaling the model without this point, and computes
the chi^2. Tests whether the lowest point is significantly
different to the others.
'''
ok = (filter.mean_wavelength(r.filters) < max_wav) & \
np.invert(r.filters_ignore)
chi_ref = np.sum( ((r.obs_fnujy[ok] - r.model_fnujy[ok])/r.obs_e_fnujy[ok])**2 )
dchi = np.zeros(len(r.obs_fnujy))
for i in range(len(dchi)):
if not ok[i]:
continue
ok_tmp = ok.copy()
ok_tmp[i] = False
model_tmp = r.model_fnujy * np.mean(r.obs_fnujy[ok_tmp]) / np.mean(r.model_fnujy[ok_tmp])
dchi[i] = np.sum( ((r.obs_fnujy[ok_tmp] - model_tmp[ok_tmp])/r.obs_e_fnujy[ok_tmp])**2 )
mini = np.argmin(dchi[ok])
print('most likely outlier: {}: {}'.format(mini, r.filters[mini]))
ok[mini] = False
print('{:f} vs. range of {:f} to {:f}'.format(dchi[mini],
np.min(dchi[ok]),
np.max(dchi[ok])))
return chi_ref, dchi
| {
"repo_name": "drgmk/sdf",
"path": "sdf/fitting.py",
"copies": "1",
"size": "21568",
"license": "mit",
"hash": -6115587222771727000,
"line_mean": 30.7176470588,
"line_max": 104,
"alpha_frac": 0.5699183976,
"autogenerated": false,
"ratio": 3.447019338341058,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9437144826490593,
"avg_score": 0.015958581890092943,
"num_lines": 680
} |
from functools import lru_cache
import os
import rethinkdb as r
from sondra.document.schema_parser import ValueHandler
try:
from werkzeug.utils import secure_filename
except ImportError:
import re
def secure_filename(name):
name = re.sub(r'\s+', '-', name) # Replace white space with dash
name = name.sub(r'([a-zA-Z]):\\', '')
return name.sub(r'[^a-zA-Z0-9\-.]+', '_', name) # Replace non alphanumerics with a single _
def _strip_slashes(p):
p = p[1:] if p.startswith('/') else p
return p[:-1] if p.endswith('/') else p
def _join_components(*paths):
return '/'.join(_strip_slashes(p) for p in paths)
class FileHandler(ValueHandler):
def __init__(self, storage_service, key, content_type='application/octet-stream'):
self._storage_service = storage_service
self._key = key
self._content_type = content_type
def post_save(self, document):
self._storage_service.assoc(document, document.obj[self._key])
def to_json_repr(self, value, document):
if not hasattr(value, 'read'):
return super().to_json_repr(value, document)
else:
return self._storage_service.store(
document=document,
key=self._key,
original_filename=getattr(value, "filename", "uploaded-file.dat"),
content_type=self._content_type,
stream=value
)
def pre_delete(self, document):
self._storage_service.delete_for_document(document)
def to_python_repr(self, value, document):
return self._storage_service.stream(value)
def to_rql_repr(self, value, document):
if not hasattr(value, 'read'):
return super().to_rql_repr(value, document)
else:
return self._storage_service.store(
document=document,
key=self._key,
original_filename=getattr(value, "filename", "uploaded-file.dat"),
content_type=self._content_type,
stream=value
)
class FileStorageDefaults(object):
"""Suite mixin for suite containing defaults for file storage"""
media_url_path = "media"
class FileStorageService(object):
def __init__(self):
self._suite = None
self._media_url = None
self._path_start = None
def _db(self, collection):
return r.db(collection.application.db)
def _conn(self, collection):
return collection.application.connection
@lru_cache()
def _table_name(self, collection):
return "_sondra_files__{collection}".format(collection=collection.name)
@lru_cache()
def _table(self, collection):
db = self._db(collection)
conn = self._conn(collection)
table_name = self._table_name(collection)
table = db.table(table_name)
all_tables = { name for name in db.table_list().run(conn) }
if table_name not in all_tables:
db.table_create(table_name).run(conn)
table.index_create('document').run(conn)
table.index_create('collection').run(conn)
return table
def connect(self, suite):
self._suite = suite
host = "{scheme}://{netloc}".format(
scheme=suite.base_url_scheme, netloc=suite.base_url_netloc)
self._media_url = _join_components(host, suite.media_url_path)
self._path_start = len(self._media_url) + 1
def assoc(self, document, url):
app, coll, pk_ext = url[self._path_start:].split('/', 2)
pk, ext = os.path.splitext(pk_ext)
self._table(document.collection).get(pk).update({"document": document.id}).run(self._conn(document.collection))
def store(self, document, key, original_filename, content_type, stream):
collection = document.collection
if document.id is not None:
self.delete_for_document(document, key)
_, filename = os.path.split(original_filename)
_, extension = os.path.splitext(filename)
result = self._table(collection).insert({
"collection": collection.name,
"document": None,
"key": key,
"original_filename": filename,
"extension": extension,
"content_type": content_type,
}).run(self._conn(collection))
new_filename = "{id}{ext}".format(id=result['generated_keys'][0], ext=extension)
self.store_file(collection, new_filename, stream)
return "{media_url}/{app}/{coll}/{new_filename}".format(
media_url=self._media_url,
app=collection.application.slug,
coll=collection.slug,
new_filename=new_filename
)
def stream_file(self, collection, ident_ext):
raise NotImplementedError("Implement stream_file in a concrete class")
def store_file(self, collection, ident_ext, stream):
raise NotImplementedError("Implement store_stream in an concrete class")
def delete_file(self, collection, ident_ext):
raise NotImplementedError("Implement delete_file in a concrete class")
def delete_from_collection(self, collection, ident):
self.delete_file(collection, ident)
self._table(collection).get(id).delete().run(self._conn)
def delete_for_document(self, document, key=None):
if key is not None:
existing = self._table(document.collection)\
.get_all(document, index='document')\
.filter({'key': key})\
.run(self._conn(document.collection))
for f in existing: # should only be one
self.delete_file(document.collection, f['id'] + f['extension'])
else:
self._table(document.collection)\
.get_all(document, index='document')\
.delete()\
.run(self._conn(document.collection))
def stream(self, url):
app, coll, pk = url[self._path_start:].split('/', 2)
pk, ext = os.path.splitext(pk)
collection = self._suite[app][coll]
record = self._table(collection).get(pk).run(self._conn(collection))
in_stream = self.stream_file(collection, pk + ext)
return {
"content_type": record['content_type'],
"filename": record['original_filename'],
"stream": in_stream
}
class LocalFileStorageDefaults(FileStorageDefaults):
"""Suite mixin for local file storage defaults"""
media_path = os.path.join(os.getcwd(), "_media")
media_path_permissions = 0o755
chunk_size = 16384
class LocalFileStorageService(FileStorageService):
def __init__(self):
super(LocalFileStorageService, self).__init__()
self._root = None
def connect(self, suite):
super(LocalFileStorageService, self).connect(suite)
self._root = suite.media_path \
if suite.media_path.startswith('/') \
else os.path.join(os.getcwd(), suite.media_path)
os.makedirs(self._root, self._suite.media_path_permissions, exist_ok=True)
def _path(self, collection, make=False):
p = os.path.join(self._root, collection.application.slug, collection.slug)
if make:
os.makedirs(p, exist_ok=True)
return p
def stream_file(self, collection, ident_ext):
return open(os.path.join(self._path(collection), ident_ext))
def delete_file(self, collection, ident_ext):
os.unlink(os.path.join(self._path(collection), ident_ext))
def store_file(self, collection, ident_ext, stream):
p = self._path(collection, True)
dest = os.path.join(p, ident_ext)
with open(dest, 'w') as out:
chunk = stream.read(self._suite.chunk_size)
while chunk:
out.write(chunk)
chunk = stream.read(self._suite.chunk_size)
out.flush()
| {
"repo_name": "JeffHeard/sondra",
"path": "sondra/files.py",
"copies": "1",
"size": "7919",
"license": "apache-2.0",
"hash": 7843037000418739000,
"line_mean": 33.5807860262,
"line_max": 119,
"alpha_frac": 0.6036115671,
"autogenerated": false,
"ratio": 3.9555444555444557,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5059156022644455,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
import os.path
import pickle
import glob
import time
import json
import numpy as np
from scipy.stats import truncnorm
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import corner
import astropy.units as u
from . import photometry
from . import spectrum
from . import model
from . import filter
from . import fitting
from . import plotting
from . import utils
from . import config as cfg
class BaseResult(object):
"""Basic class to compute and handle fitting results."""
@lru_cache(maxsize=128)
def __init__(self,rawphot,model_comps):
"""Basic instantiation of the Result object."""
self.file_info(rawphot,model_comps)
@lru_cache(maxsize=128)
def file_info(self,rawphot,model_comps):
"""Basic file info."""
# component info
self.model_comps = model_comps
self.star_or_disk = ()
for comp in model_comps:
if comp in cfg.models['star']:
self.star_or_disk += ('star',)
elif comp in cfg.models['disk']:
self.star_or_disk += ('disk',)
else:
raise utils.SdfError("couldn't assigm comp {} to star or disk "
"given lists in {} and {}".
format(comp,cfg.models['star'],
cfg.models['disk']))
self.n_comps = len(model_comps)
# where the rawphot file is
self.rawphot = rawphot
self.path = os.path.dirname(rawphot)
# id
self.id = os.path.basename(rawphot).rstrip('-rawphot.txt')
# where the multinest output is (or will be), create if needed
self.pmn_dir = self.path + '/' + self.id \
+ cfg.fitting['pmn_dir_suffix']
if not os.path.exists(self.pmn_dir):
os.mkdir(self.pmn_dir)
# the base name for multinest files
self.pmn_base = self.pmn_dir + '/' \
+ cfg.fitting['model_join'].join(self.model_comps) \
+ cfg.fitting['pmn_model_suffix']
# plot names, pickle, and json, files may not exist yet
self.corner_plot = self.pmn_base+'corner.png'
self.distributions_plot = self.pmn_base+'distributions.png'
self.sed_thumb = self.pmn_base + 'sed_thumb.png'
self.pickle = self.pmn_base + '.pkl'
self.json = self.pmn_base + '.json'
# rawphot modification time
self.rawphot_time = os.path.getmtime(self.rawphot)
def fill_data_models(self,nospec=False):
"""Get photometry/spectra and the models needed to fit these.
Read in the observations and corresponding models. We must do
this since models are not saved in the pickle to save space.
Parameters
----------
nospec : bool, optional
Don't include any spectra when reading in the observations.
"""
# observations; keywords, tuples of photometry and spectra. if
# there is nothing in the photometry file then don't fill
# anything else
p = photometry.Photometry.read_sdb_file(self.rawphot)
if p is None:
return
elif np.sum(p.ignore) == p.nphot:
return
self.obs = (p,)
self.obs_keywords = utils.get_sdb_keywords(self.rawphot)
self.exclude_spectra = nospec
if not nospec:
s = spectrum.ObsSpectrum.read_sdb_file(self.rawphot,
module_split=True,
nspec=1)
if s is not None:
self.obs = (p,) + s
# models
mod,plmod = model.get_models(self.obs,self.model_comps)
self.models = mod
self.pl_models = plmod
self.model_info = model.models_info(self.models)
def fill_observations(self):
"""Fill observed attributes.
Returns obs_nel, needed to compute model fluxes.
"""
# this is largely copied from fitting.residual
tmp = fitting.concat_obs(self.obs)
self.obs_fnujy,self.obs_e_fnujy,self.obs_upperlim,self.filters_ignore,\
obs_ispec,obs_nel,self.wavelengths,self.filters,self.obs_bibcode = tmp
spec_norm = np.take(self.best_params+[1.0],obs_ispec)
self.obs_fnujy = self.obs_fnujy * spec_norm
self.obs_e_fnujy = self.obs_e_fnujy * spec_norm
return obs_nel
def fill_best_fit_spectra(self):
"""Fill best fit model spectra.
.. todo:: resample spectrum rather than interpolate model
"""
# ObsSpectrum for each component, at original resolution
wave = cfg.models['default_wave']
star_spec = np.zeros(len(wave))
disk_spec = np.zeros(len(wave))
total_spec = np.zeros(len(wave))
self.comp_spectra = ()
for i,comp in enumerate(self.pl_models):
for mtmp in comp:
if not isinstance(mtmp,model.SpecModel):
continue
m = mtmp.copy()
s = spectrum.ObsSpectrum(wavelength=m.wavelength,
fnujy=m.fnujy(self.comp_best_params[i]))
s.fill_irradiance()
self.comp_spectra += (s,)
# TODO: resample spectrum rather than interpolate model
m.interp_to_wavelengths(wave)
total_spec += m.fnujy(self.comp_best_params[i])
if self.star_or_disk[i] == 'star':
star_spec += m.fnujy(self.comp_best_params[i])
elif self.star_or_disk[i] == 'disk':
disk_spec += m.fnujy(self.comp_best_params[i])
# and total/star/disk spectra, at common wavelengths
self.total_spec = spectrum.ObsSpectrum(wavelength=wave,fnujy=total_spec)
if np.max(star_spec) > 0:
self.star_spec = spectrum.ObsSpectrum(wavelength=wave,fnujy=star_spec)
else:
self.star_spec = None
if np.max(disk_spec) > 0:
self.disk_spec = spectrum.ObsSpectrum(wavelength=wave,fnujy=disk_spec)
else:
self.disk_spec = None
def main_results_text(self):
"""Return nicely formatted tuple of text of results."""
# the array sets the order, and the dict the conversion
text_ord = ['Teff','lstar','rstar',
'Temp','rdisk_bb','ldisk_lstar',
'lam0','beta','Dmin','q']
text_sub = {'Teff': ['T<sub>star</sub>','K'],
'MH': ['[M/H]',''],
'logg': ['logg',''],
'lstar':['L<sub>star</sub>','L<sub>Sun</sub>'],
'rstar':['R<sub>star</sub>','R<sub>Sun</sub>'],
'Temp': ['T<sub>dust</sub>','K'],
'lam0': ['λ<sub>0</sub>','μm'],
'beta': ['β',''],
'Dmin': ['D<sub>min</sub>','μm'],
'q': ['q',''],
'ldisk_lstar':['L<sub>disk</sub>/L<sub>star</sub>',''],
'rdisk_bb':['R<sub>BB</sub>','au']}
text = ()
for res in self.main_results:
string = ''
i = 0
for par in text_ord:
if par in res.keys():
unc,meas = utils.rnd1sf([res['e_'+par],res[par]])
if i > 0:
string += ' , '
string += '{} = {:g} ± {:g} {}'.format(text_sub[par][0],meas,unc,text_sub[par][1])
i += 1
text = text + (string,)
return text
def basic_results(self):
"""A dictionary of basic results, unreliant on classes."""
r = {}
# some info
r['id'] = self.id
r['write_time'] = time.time()
r['model_comps'] = self.model_comps
r['main_results'] = self.main_results
r['parameters'] = self.parameters
r['best_params'] = self.best_params
r['best_params_1sig'] = self.best_params_1sig
r['chisq'] = self.chisq
# observed photometry
r['phot_band'] = []
r['phot_wavelength'] = []
r['phot_fnujy'] = []
r['phot_e_fnujy'] = []
r['phot_upperlim'] = []
r['phot_ignore'] = []
for p in self.obs:
if not isinstance(p,photometry.Photometry):
continue
r['phot_band'].append(p.filters.tolist())
r['phot_wavelength'].append(p.mean_wavelength().tolist())
r['phot_fnujy'].append(p.fnujy.tolist())
r['phot_e_fnujy'].append(p.e_fnujy.tolist())
r['phot_upperlim'].append(p.upperlim.tolist())
r['phot_ignore'].append(p.ignore.tolist())
# model photometry at observed wavelengths
# first select filters and colours (spectra have None for filter)
filt = np.array([isinstance(f,(str,np.str_)) for f in self.filters])
# per-component fluxes and errors
r['model_comp_fnujy'] = self.model_comp_fnujy[:,filt].tolist()
avg_err = ( self.model_comp_fnujy_1sig_lo[:,filt] +
self.model_comp_fnujy_1sig_hi[:,filt] ) / 2.0
r['model_comp_fnujy_1sig'] = avg_err.tolist()
# full models
r['model_total_fnujy'] = self.model_fnujy[filt].tolist()
avg_err = ( self.model_fnujy_1sig_lo[filt] +
self.model_fnujy_1sig_hi[filt] ) / 2.0
r['model_total_fnujy_1sig'] = avg_err.tolist()
# observed spectra
ispec = -1
r['spectra'] = []
for s in self.obs:
if not isinstance(s,spectrum.ObsSpectrum):
continue
t = {}
t['wavelength'] = s.wavelength.tolist()
t['fnujy'] = (s.fnujy * self.best_params[ispec]).tolist()
t['e_fnujy'] = (s.e_fnujy * self.best_params[ispec]).tolist()
ispec -= 1
r['spectra'].append(t)
# spectra of each model component
r['model_spectra'] = []
for s in self.comp_spectra:
r['model_spectra'].append({'wavelength': s.wavelength.tolist(),
'fnujy': s.fnujy.tolist()})
# total spectra for "star" and "disk" components
if self.star_spec is not None:
r['star_spec'] = {}
r['star_spec']['wavelength'] = self.star_spec.wavelength.tolist()
r['star_spec']['fnujy'] = self.star_spec.fnujy.tolist()
if self.disk_spec is not None:
r['disk_spec'] = {}
r['disk_spec']['wavelength'] = self.disk_spec.wavelength.tolist()
r['disk_spec']['fnujy'] = self.disk_spec.fnujy.tolist()
return r
def pickle_output(self):
"""Pickle the results for later."""
# see if we need to write
if hasattr(self,'pickle_time'):
if self.analysis_time < self.pickle_time:
return
# save for later in a pickle, updating the pickle_time to now
self.pickle_time = time.time()
with open(self.pickle,'wb') as f:
pickle.dump(self,f)
def load(file):
"""Load a previously save result pickle."""
with open(file,'rb') as f:
r = pickle.load(f)
# reload the necessary models (that weren't saved)
r.models,r.pl_models = model.get_models(r.obs,r.model_comps)
return r
def sed_thumbnail(self,file=None,update=False):
"""Generate a thumbnail of the SED."""
if file is None:
file = self.sed_thumb
if os.path.exists(file) and not update:
if os.path.getmtime(file) > self.pickle_time:
return
plotting.quick_sed(self,file=file,axis_labels=False,dpi=40)
def write_json(self,update=False):
"""Write basic results as a json file."""
if os.path.exists(self.json) and not update:
if os.path.getmtime(self.json) > self.pickle_time:
return
with open(self.json,'w') as f:
json.dump(self.basic_results(),f)
class FixedResult(BaseResult):
"""Class for unfitted (i.e. specified) models."""
def __init__(self,rawphot,model_comps,parameters,
nospec=False):
"""Do everything at initialisation.
Parameters
----------
rawphot : string
Name of rawphot file.
model_comps : tuple of strings
Models to use.
parameters : tuple of lists
Parameters for each model component, excluding normalisation.
nospec : bool, optional
Exclude spectra from fitting.
"""
self.file_info(rawphot,model_comps)
self.fill_data_models(nospec=nospec)
# reorganise models for zero dimensions
mod = ()
for mod1,param in zip(self.models,parameters):
mod_tmp = ()
for mod_comp in mod1:
mod_tmp += (model.reduce_zerod(mod_comp,param),)
mod += (mod_tmp,)
pl_mod = ()
for pl1,param in zip(self.pl_models,parameters):
pl_tmp = ()
for pl_comp in pl1:
pl_tmp += (model.reduce_zerod(pl_comp,param),)
pl_mod += (pl_tmp,)
self.models = mod
self.pl_models = pl_mod
self.model_info = model.models_info(self.models)
# find best fit normalisation
res = minimize(fitting.chisq,
np.ones(self.model_info['ndim']),
args=(self.obs,self.models))
# fill model parameters
self.parameters = self.model_info['parameters']
self.best_params = res['x']
self.comp_best_params = ()
for param in self.best_params:
self.comp_best_params += ([param],)
# fill with post-processing steps
obs_nel = self.fill_observations()
# get model fluxes and spectra
self.model_fnujy,model_comp_fnujy = \
model.model_fluxes(self.models,self.best_params,obs_nel)
self.residuals,_,_ = fitting.residual(self.best_params,
self.obs,self.models)
self.fill_best_fit_spectra()
class SampledResult(BaseResult):
"""Class for results from Monte-Carlo or other sampling methods.
Assumes that samples have uniform weights, e.g. from MCMC, or
MultiNest's post_equal_weights.dat.
"""
def star_results(self):
"""Return tuple of dicts of star-specifics, if result has star."""
star = ()
distributions = ()
for i,comp in enumerate(self.model_comps):
if comp in cfg.models['star']:
star_one, dist_one = self.star_results_one(i)
star = star + (star_one,)
distributions = distributions + (dist_one,)
return star,distributions
def star_results_one(self,i):
"""Return dict of star-specifics for ith model component.
.. todo:: make stellar parameters consistent, e.g. re-compute
lstar from rstar and teff if we have a distance
"""
star = {}
star['comp_no'] = i
distributions = {}
for j,par in enumerate(self.comp_parameters[i]):
star[par] = self.comp_best_params[i][j]
star['e_'+par] = self.comp_best_params_1sig[i][j]
# rstar and lstar if star were at 1pc
rstar_1pc_dist = np.sqrt(cfg.ssr * 10**self.comp_param_samples[i][:,-1]/np.pi) \
* u.pc.to(u.m)
lstar_1pc_dist = 4 * np.pi * rstar_1pc_dist**2 \
* self.comp_param_samples[i][:,0]**4 \
* 5.670373e-08 / u.L_sun.to(u.W)
distributions['lstar_1pc'] = lstar_1pc_dist
self.distributions['lstar_1pc_tot'] += lstar_1pc_dist
lo,star['lstar_1pc'],hi = np.percentile(lstar_1pc_dist,[16.0,50.0,84.0])
star['e_lstar_1pc_lo'] = star['lstar_1pc'] - lo
star['e_lstar_1pc_hi'] = hi - star['lstar_1pc']
star['e_lstar_1pc'] = (star['e_lstar_1pc_lo']+star['e_lstar_1pc_hi'])/2.0
# distance-dependent params
if 'parallax' in self.distributions.keys():
star['plx_arcsec'] = self.obs_keywords['plx_value'] / 1e3
star['e_plx_arcsec'] = self.obs_keywords['plx_err'] / 1e3
# combine lstar_1pc and plx distributions for lstar
lstar_dist = lstar_1pc_dist / self.distributions['parallax']**2
distributions['lstar'] = lstar_dist
lo,star['lstar'],hi = np.percentile(lstar_dist,[16.0,50.0,84.0])
star['e_lstar_lo'] = star['lstar'] - lo
star['e_lstar_hi'] = hi - star['lstar']
star['e_lstar'] = (star['e_lstar_lo']+star['e_lstar_hi'])/2.0
rstar_dist = rstar_1pc_dist / self.distributions['parallax'] / u.R_sun.to(u.m)
distributions['rstar'] = rstar_dist
lo,star['rstar'],hi = np.percentile(rstar_dist,[16.0,50.0,84.0])
star['e_rstar_lo'] = star['rstar'] - lo
star['e_rstar_hi'] = hi - star['rstar']
star['e_rstar'] = (star['e_rstar_lo']+star['e_rstar_hi'])/2.0
return (star,distributions)
def disk_r_results(self):
"""Return tuple of dicts of disk-specifics, if result has disk_r."""
disk_r = ()
distributions = ()
for i,comp in enumerate(self.model_comps):
if comp in cfg.models['disk_r']:
disk_r_one,dist_one = self.disk_r_results_one(i)
disk_r = disk_r + (disk_r_one,)
distributions = distributions + (dist_one,)
return disk_r,distributions
def disk_r_results_one(self,i):
"""Return dict of disk_r-specifics for ith model component."""
disk_r = {}
disk_r['comp_no'] = i
distributions = {}
for j,par in enumerate(self.comp_parameters[i]):
if 'log_' in par:
par_in = par.replace('log_','')
disk_r[par_in] = 10**self.comp_best_params[i][j]
disk_r['e_'+par_in] = (
10**(self.comp_best_params[i][j] + \
self.comp_best_params_1sig[i][j]) \
- \
10**(self.comp_best_params[i][j] - \
self.comp_best_params_1sig[i][j]) \
) / 2.
else:
par_in = par
disk_r[par_in] = self.comp_best_params[i][j]
disk_r['e_'+par_in] = self.comp_best_params_1sig[i][j]
# array of disk temperature samples
if 'Temp' in par:
if par == 'log_Temp':
distributions['tdisk'] = 10**self.comp_param_samples[i][:,j]
elif par == 'Temp':
distributions['tdisk'] = self.comp_param_samples[i][:,j]
# disk and fractional luminosity
ldisk_1pc_dist = np.zeros(self.n_samples)
for j,par in enumerate(self.comp_param_samples[i]):
# there will only be one SpecModel in the ith component
for m in self.pl_models[i]:
if not isinstance(m,model.SpecModel):
continue
s = spectrum.ObsSpectrum(wavelength=m.wavelength,
fnujy=m.fnujy(par))
s.fill_irradiance()
ldisk_1pc_dist[j] = s.irradiance \
* 4 * np.pi * (u.pc.to(u.m))**2 / u.L_sun.to(u.W)
distributions['ldisk_1pc'] = ldisk_1pc_dist
lo,disk_r['ldisk_1pc'],hi = np.percentile(ldisk_1pc_dist,[16.0,50.0,84.0])
disk_r['e_ldisk_1pc_lo'] = disk_r['ldisk_1pc'] - lo
disk_r['e_ldisk_1pc_hi'] = hi - disk_r['ldisk_1pc']
disk_r['e_ldisk_1pc'] = (disk_r['e_ldisk_1pc_lo']+disk_r['e_ldisk_1pc_hi'])/2.0
# stellar luminosities (if >1 star) were summed already
if np.sum(self.distributions['lstar_1pc_tot']) > 0.0:
ldisk_lstar_dist = ldisk_1pc_dist / self.distributions['lstar_1pc_tot']
distributions['ldisk_lstar'] = ldisk_lstar_dist
lo,disk_r['ldisk_lstar'],hi = np.percentile(ldisk_lstar_dist,
[16.0,50.0,84.0])
disk_r['e_ldisk_lstar_lo'] = disk_r['ldisk_lstar'] - lo
disk_r['e_ldisk_lstar_hi'] = hi - disk_r['ldisk_lstar']
disk_r['e_ldisk_lstar'] = (disk_r['e_ldisk_lstar_lo']+
disk_r['e_ldisk_lstar_hi'])/2.0
# distance (and stellar L)-dependent params
if 'parallax' in self.distributions.keys():
lstar = self.distributions['lstar_1pc_tot'] / \
self.distributions['parallax']**2
rdisk_bb_dist = lstar**0.5 * (278.3/distributions['tdisk'])**2
distributions['rdisk_bb'] = rdisk_bb_dist
lo,disk_r['rdisk_bb'],hi = np.percentile(rdisk_bb_dist,
[16.0,50.0,84.0])
disk_r['e_rdisk_bb_lo'] = disk_r['rdisk_bb'] - lo
disk_r['e_rdisk_bb_hi'] = hi - disk_r['rdisk_bb']
disk_r['e_rdisk_bb'] = (disk_r['e_rdisk_bb_lo']+disk_r['e_rdisk_bb_hi'])/2.0
return (disk_r,distributions)
class Result(SampledResult):
"""Class to compute and handle multinest results.
.. todo:: change this to MultinestResult, doing so will cause issues
with loaded pickles, which will still be Result.
"""
@lru_cache(maxsize=128)
def get(rawphot,model_comps,update_mn=False,
update_an=False,update_json=False,update_thumb=False,
nospec=False):
"""Take photometry file and model_name, and fill the rest.
The process works on a heirarchy of update times, each of which
must be more recent than the previous, or a redo will be forced.
rawphot_time < mn_time < mn_a_time < analysis_time < pickle_time
These must also be more recent than the configuration variables
for the oldest allowed analysis and multinest times.
Each step can be called, and based on these the code will be
(re)run or not. fill_data_models will always be run because the
models are not saved in the pickle to save space.
Parameters
----------
rawphot : str
Rawphot file from sdb.
model_comps : tuple of str
Tuple of model names to fit.
udpate_mn : bool, optional
Force update of mutinest fitting.
udpate_an : bool, optional
Force update of post-multinest analysis.
update_thumb : bool, optional
Force update of thumbnail plot.
update_json : bool, optional
Force update of json file.
nospec : bool, optional
Exclude and spectra when observations are read in.
See Also
--------
result.fill_data_models
result.run_multinest
result.run_analysis
"""
self = Result(rawphot,model_comps)
# see if we have a pickle of results already
if os.path.exists(self.pickle):
with open(self.pickle,'rb') as f:
self = pickle.load(f)
# update object with local file info since processing may have
# been done elsewhere
self.file_info(rawphot,model_comps)
# see if we can skip everything except the json
if hasattr(self,'rawphot_time') and hasattr(self,'mn_time') and \
hasattr(self,'mn_a_time') and hasattr(self,'mn_time') and \
hasattr(self,'rawphot_time') and hasattr(self,'pickle_time'):
if not update_an and not update_mn and \
self.exclude_spectra == nospec and \
self.mn_time > cfg.fitting['mn_oldest'] and \
self.analysis_time > cfg.fitting['an_oldest'] and \
( len(self.param_samples) == len(self.analyzer.get_equal_weighted_posterior()) or \
len(self.param_samples) == cfg.fitting['n_samples_max'] ) and \
self.pickle_time > self.analysis_time > self.mn_a_time > \
self.mn_time > self.rawphot_time:
self.sed_thumbnail(update=update_thumb)
self.write_json(update=update_json)
return self
mn_up = update_mn
if hasattr(self,'exclude_spectra'):
mn_up = (self.exclude_spectra != nospec or mn_up)
# run everything, these will check if anything needs to be done
self.fill_data_models(nospec=nospec)
# stop if no photometry
if not hasattr(self,'obs'):
return self
self.run_multinest(update_mn=mn_up)
self.get_multinest_results(update_mn_a=update_an)
self.run_analysis(update_an=update_an)
# delete the models to save space, we don't need them again
self.models = ''
self.pl_models = ''
self.pickle_output()
self.sed_thumbnail(update=update_thumb)
self.write_json(update=update_json)
return self
def run_multinest(self,update_mn=False):
"""Run multinest.
Run multinest fitting, get the results as a pymultinest
analyzer object, and make a corner plot of the points. The
fitting will be run if it has never been run (no files or no
mn_time attribute), is older than the read time of the
photometry file, or is older than the mn_oldest time in
config.fitting.
All multinest files are updated, even if a previous run was
used, so the file modification times are not useful for testing
for whether an update is needed.
Parameters
----------
update_mn : bool, optional
Force update of multinest fitting.
"""
print(" multinest")
import pymultinest as pmn
# if we want to re-run multinest, delete previous output first
run_mn = update_mn
if os.path.exists(self.pmn_base+'phys_live.points'):
if hasattr(self,'mn_time'):
if self.rawphot_time > self.mn_time:
run_mn = True
if cfg.fitting['mn_oldest'] > self.mn_time:
run_mn = True
else:
run_mn = True
# check the number of live points in the previous run
npt = 0
if os.path.exists(self.pmn_base+'phys_live.points'):
with open(self.pmn_base+'phys_live.points') as f:
for l in f: npt += 1
# multinest does checkpointing, so we can force a re-run by
# deleting the files
if run_mn or npt != cfg.fitting['n_live']:
print(" redoing")
self.delete_multinest()
# we must go there, multinest only takes 100 char paths
with utils.pushd(self.pmn_dir):
fitting.multinest( self.obs,self.models,'.' )
self.mn_time = os.path.getmtime(self.pmn_base+'phys_live.points')
def get_multinest_results(self,update_mn_a=False):
'''Get results and samples from multinest.
This could have been contained within run_analysis, so same
update requirement as that routine.
Parameters
----------
update_mn_a : bool, optional
Force update of multinest results.
'''
print(" getting analysis data")
import pymultinest as pmn
# update the analyzer if necessary
get_a = update_mn_a
if hasattr(self,'mn_a_time'):
if self.mn_time > self.mn_a_time:
get_a = True
if cfg.fitting['an_oldest'] > self.mn_a_time:
get_a = True
else:
get_a = True
if not get_a:
print(" skipping")
return
a = pmn.Analyzer(outputfiles_basename=self.pmn_base,
n_params=self.model_info['ndim'])
self.analyzer = a
# parameter names and best fit
self.evidence = self.analyzer.get_stats()['global evidence']
self.parameters = self.model_info['parameters']
# equally weighted samples for distributions, last column is loglike
self.param_samples = self.analyzer.get_equal_weighted_posterior()[:,:-1]
if len(self.param_samples) > cfg.fitting['n_samples_max']:
self.param_samples = self.param_samples[:cfg.fitting['n_samples_max']]
self.n_samples = len(self.param_samples)
# best fit parameters
lo, med, hi = np.percentile(self.param_samples,
[16.0, 50.0, 84.0], axis=0)
self.best_params = list(med)
self.best_params_1sig = list((hi - lo)/2)
# split the parameters into components
self.n_parameters = len(self.parameters)
self.comp_best_params = ()
self.comp_best_params_1sig = ()
self.comp_parameters = ()
self.comp_param_samples = ()
i0 = 0
for comp in self.models:
nparam = len(comp[0].parameters)+1
self.comp_parameters += (comp[0].parameters,)
self.comp_best_params += (self.best_params[i0:i0+nparam],)
self.comp_best_params_1sig += (self.best_params_1sig[i0:i0+nparam],)
comp_i_samples = ()
comp_i_samples = self.param_samples[:,i0:i0+nparam]
self.comp_param_samples += (comp_i_samples,)
i0 += nparam
self.mn_a_time = time.time()
# corner plot of parameters
# parameter fitting corner plot
plot = False
if not os.path.exists(self.corner_plot):
plot = True
else:
if os.path.getmtime(self.corner_plot) < self.mn_a_time:
plot = True
if plot:
fig = corner.corner(self.param_samples, show_titles=True,
labels=self.model_info['parameters'])
fig.savefig(self.corner_plot)
plt.close(fig) # not doing this causes an epic memory leak
def run_analysis(self,update_an=False):
"""Run analysis of the multinest results.
Run post-multinest fitting analysis. Will be run if forced to,
or if the multinest analyzer time is more recent than the
previous analysis time.
.. todo:: in deriving model fluxes we have a choice; photometry
for median model, or median photometry across possible models.
These are different, particularly so when the best fit is very
uncertain. The former is better for plotting, as the photometry
will line up with the spectrum (which is the median), but the
latter is better for getting the range of allowed fluxes.
Parameters
----------
update_an : bool, optional
Force update of analysis
"""
print(" analysis")
# see if we have anything to do
run_an = update_an
if hasattr(self,'analysis_time'):
if self.mn_a_time > self.analysis_time:
run_an = True
if cfg.fitting['an_oldest'] > self.analysis_time:
run_an = True
else:
run_an = True
if not run_an:
print(" skipping")
return
# fluxes and uncertainties etc. using parameter samples
self.distributions = {}
# we will add to lstar at 1pc when getting star-specific results
self.distributions['lstar_1pc_tot'] = np.zeros(self.n_samples)
# generate a normal distribution of parallaxes, truncated to
# contain no negative values, if there is an uncertainty
if self.obs_keywords['plx_err'] is not None \
and self.obs_keywords['plx_value'] is not None:
if self.obs_keywords['plx_err'] > 0 \
and self.obs_keywords['plx_value'] > 0:
lo_cut = -1. * ( self.obs_keywords['plx_value'] / \
self.obs_keywords['plx_err'] )
self.distributions['parallax'] = \
truncnorm.rvs(lo_cut,np.inf,
loc=self.obs_keywords['plx_value']/1e3,
scale=self.obs_keywords['plx_err']/1e3,
size=self.n_samples)
# observed fluxes
obs_nel = self.fill_observations()
# model fluxes, including colours/indices
model_dist = np.zeros((len(self.filters),self.n_samples))
model_comp_dist = np.zeros((self.n_comps,len(self.filters),
self.n_samples))
# all fluxes, including colours/indices
p_all = photometry.Photometry(filters=filter.Filter.all)
self.all_filters = p_all.filters
p_all_mod,_ = model.get_models((p_all,),self.model_comps)
all_dist = np.zeros((p_all.nphot,self.n_samples))
all_comp_dist = np.zeros((self.n_comps,p_all.nphot,
self.n_samples))
for i,par in enumerate(self.param_samples):
# TODO: compute once, grab model fluxes from all fluxes...
model_fnujy,model_comp_fnujy = \
model.model_fluxes(self.models,par,obs_nel)
model_dist[:,i] = model_fnujy
model_comp_dist[:,:,i] = model_comp_fnujy
all_fnujy,all_comp_fnujy = \
model.model_fluxes(p_all_mod,par,[p_all.nphot])
all_dist[:,i] = all_fnujy
all_comp_dist[:,:,i] = all_comp_fnujy
# summed model fluxes
self.distributions['model_fnujy'] = model_dist
lo,self.model_fnujy,hi = np.percentile(model_dist,
[16.0,50.0,84.0], axis=1)
self.model_fnujy_1sig_lo = self.model_fnujy - lo
self.model_fnujy_1sig_hi = hi - self.model_fnujy
# per-component model fluxes
self.distributions['model_comp_fnujy'] = model_comp_dist
lo,self.model_comp_fnujy,hi = np.percentile(model_comp_dist,
[16.0,50.0,84.0],
axis=2)
self.model_comp_fnujy_1sig_lo = self.model_comp_fnujy - lo
self.model_comp_fnujy_1sig_hi = hi - self.model_comp_fnujy
# residuals and fitting results
self.residuals,_,_ = fitting.residual(self.best_params,
self.obs,self.models)
self.chisq = np.sum( np.square( self.residuals ) )
self.dof = len(self.wavelengths)-len(self.parameters)-1
# star and disk photometry for all filters
star_phot_dist = np.zeros((p_all.nphot,self.n_samples))
disk_phot_dist = np.zeros((p_all.nphot,self.n_samples))
for i,comp in enumerate(self.model_comps):
if self.star_or_disk[i] == 'star':
star_phot_dist += all_comp_dist[i,:,:]
elif self.star_or_disk[i] == 'disk':
disk_phot_dist += all_comp_dist[i,:,:]
# star photometry in all filters
self.distributions['star_phot'] = star_phot_dist
lo,self.all_star_phot,hi = np.percentile(star_phot_dist,
[16.0,50.0,84.0],axis=1)
self.all_star_phot_1sig_lo = self.all_star_phot - lo
self.all_star_phot_1sig_hi = hi - self.all_star_phot
if np.sum(self.all_star_phot) == 0:
self.all_star_phot = None
# disk photometry in all filters
self.distributions['disk_phot'] = disk_phot_dist
lo,self.all_disk_phot,hi = np.percentile(disk_phot_dist,
[16.0,50.0,84.0],axis=1)
self.all_disk_phot_1sig_lo = self.all_disk_phot - lo
self.all_disk_phot_1sig_hi = hi - self.all_disk_phot
if np.sum(self.all_disk_phot) == 0:
self.all_disk_phot = None
# component photometry in all filters
self.distributions['all_comp_phot'] = all_comp_dist
lo,self.all_comp_phot,hi = np.percentile(all_comp_dist,
[16.0,50.0,84.0],axis=2)
self.all_comp_phot_1sig_lo = self.all_comp_phot - lo
self.all_comp_phot_1sig_hi = hi - self.all_comp_phot
# total photometry in all filters
self.distributions['all_phot'] = all_dist
lo,self.all_phot,hi = np.percentile(all_dist,
[16.0,50.0,84.0], axis=1)
self.all_phot_1sig_lo = self.all_phot - lo
self.all_phot_1sig_hi = hi - self.all_phot
# model-specifics, also combined into a single tuple
self.star,self.star_distributions = self.star_results()
self.disk_r,self.disk_r_distributions = self.disk_r_results()
self.main_results = self.star + self.disk_r
# best fit spectra
self.fill_best_fit_spectra()
# set analysis finish time
self.analysis_time = time.time()
# corner plot of distributions, join them together first
plot = False
if not os.path.exists(self.distributions_plot):
plot = True
else:
if os.path.getmtime(self.distributions_plot) < self.analysis_time:
plot = True
if plot:
samples = np.zeros(self.n_samples)
labels = []
if 'parallax' in self.distributions.keys():
samples = np.vstack((samples,self.distributions['parallax']))
labels.append('parallax')
for dist in self.star_distributions:
for key in dist.keys():
samples = np.vstack((samples,dist[key]))
labels.append(key)
for dist in self.disk_r_distributions:
for key in dist.keys():
samples = np.vstack((samples,dist[key]))
labels.append(key)
samples = samples[1:]
fig = corner.corner(samples.transpose(),
show_titles=True, labels=labels)
fig.savefig(self.distributions_plot)
plt.close(fig)
def delete_multinest(self):
"""Delete multinest output so it can be run again."""
fs = glob.glob(self.pmn_base+'*')
for f in fs:
os.remove(f)
def sort_results(results):
"""Return indices to sort a list of Result objects by evidence."""
ev = []
ndim = []
for r in results:
ndim.append( r.model_info['ndim'] )
ev.append( r.evidence )
return fitting.sort_evidence(ev,ndim)
| {
"repo_name": "drgmk/sdf",
"path": "sdf/result.py",
"copies": "1",
"size": "39493",
"license": "mit",
"hash": 1001727455177722800,
"line_mean": 37.1574879227,
"line_max": 109,
"alpha_frac": 0.5347276733,
"autogenerated": false,
"ratio": 3.6936962214739992,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4728423894773999,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
import pandas as pd
import numpy as np
from collections import namedtuple
from pathlib import Path
import artistools
def parse_adata(fadata, phixsdict, ionlist):
"""Generate ions and their level lists from adata.txt."""
firstlevelnumber = 1
for line in fadata:
if not line.strip():
continue
ionheader = line.split()
Z = int(ionheader[0])
ionstage = int(ionheader[1])
level_count = int(ionheader[2])
ionisation_energy_ev = float(ionheader[3])
if not ionlist or (Z, ionstage) in ionlist:
level_list = []
for levelindex in range(level_count):
row = fadata.readline().split()
levelname = row[4].strip('\'')
numberin = int(row[0])
assert levelindex == numberin - firstlevelnumber
phixstargetlist, phixstable = phixsdict.get((Z, ionstage, numberin), ([], []))
level_list.append((float(row[1]), float(row[2]), int(row[3]), levelname, phixstargetlist, phixstable))
dflevels = pd.DataFrame(level_list,
columns=['energy_ev', 'g', 'transition_count',
'levelname', 'phixstargetlist', 'phixstable'])
yield Z, ionstage, level_count, ionisation_energy_ev, dflevels
else:
for _ in range(level_count):
fadata.readline()
def parse_transitiondata(ftransitions, ionlist):
firstlevelnumber = 1
for line in ftransitions:
if not line.strip():
continue
ionheader = line.split()
Z = int(ionheader[0])
ionstage = int(ionheader[1])
transition_count = int(ionheader[2])
if not ionlist or (Z, ionstage) in ionlist:
translist = []
for _ in range(transition_count):
row = ftransitions.readline().split()
translist.append(
(int(row[0]) - firstlevelnumber, int(row[1]) - firstlevelnumber,
float(row[2]), float(row[3]), int(row[4]) == 1))
yield Z, ionstage, pd.DataFrame(translist, columns=['lower', 'upper', 'A', 'collstr', 'forbidden'])
else:
for _ in range(transition_count):
ftransitions.readline()
def parse_phixsdata(fphixs, ionlist):
firstlevelnumber = 1
nphixspoints = int(fphixs.readline())
phixsnuincrement = float(fphixs.readline())
xgrid = np.linspace(1.0, 1.0 + phixsnuincrement * (nphixspoints + 1), num=nphixspoints + 1, endpoint=False)
for line in fphixs:
if not line.strip():
continue
ionheader = line.split()
Z = int(ionheader[0])
upperionstage = int(ionheader[1])
upperionlevel = int(ionheader[2]) - firstlevelnumber
lowerionstage = int(ionheader[3])
lowerionlevel = int(ionheader[4]) - firstlevelnumber
# threshold_ev = float(ionheader[5])
assert upperionstage == lowerionstage + 1
if upperionlevel >= 0:
targetlist = [(upperionlevel, 1.0)]
else:
targetlist = []
ntargets = int(fphixs.readline())
for _ in range(ntargets):
level, fraction = fphixs.readline().split()
targetlist.append((int(level) - firstlevelnumber, float(fraction)))
if not ionlist or (Z, lowerionstage) in ionlist:
phixslist = []
for _ in range(nphixspoints):
phixslist.append(float(fphixs.readline()) * 1e-18)
phixstable = np.array(list(zip(xgrid, phixslist)))
yield Z, upperionstage, upperionlevel, lowerionstage, lowerionlevel, targetlist, phixstable
else:
for _ in range(nphixspoints):
fphixs.readline()
@lru_cache(maxsize=8)
def get_levels(modelpath, ionlist=None, get_transitions=False, get_photoionisations=False):
"""Return a list of lists of levels."""
adatafilename = Path(modelpath, 'adata.txt')
transitionsdict = {}
if get_transitions:
transition_filename = Path(modelpath, 'transitiondata.txt')
print(f'Reading {transition_filename.relative_to(modelpath.parent)}')
with artistools.zopen(transition_filename, 'rt') as ftransitions:
transitionsdict = {
(Z, ionstage): dftransitions
for Z, ionstage, dftransitions in parse_transitiondata(ftransitions, ionlist)}
phixsdict = {}
if get_photoionisations:
phixs_filename = Path(modelpath, 'phixsdata_v2.txt')
print(f'Reading {phixs_filename.relative_to(Path(modelpath).parent)}')
with artistools.zopen(phixs_filename, 'rt') as fphixs:
for (Z, upperionstage, upperionlevel, lowerionstage,
lowerionlevel, phixstargetlist, phixstable) in parse_phixsdata(fphixs, ionlist):
phixsdict[(Z, lowerionstage, lowerionlevel)] = (phixstargetlist, phixstable)
level_lists = []
iontuple = namedtuple('ion', 'Z ion_stage level_count ion_pot levels transitions')
with artistools.zopen(adatafilename, 'rt') as fadata:
print(f'Reading {adatafilename.relative_to(Path(modelpath).parent)}')
for Z, ionstage, level_count, ionisation_energy_ev, dflevels in parse_adata(fadata, phixsdict, ionlist):
translist = transitionsdict.get((Z, ionstage), pd.DataFrame())
level_lists.append(iontuple(Z, ionstage, level_count, ionisation_energy_ev, dflevels, translist))
dfadata = pd.DataFrame(level_lists)
return dfadata
def parse_recombratefile(frecomb):
for line in frecomb:
Z, upper_ionstage, t_count = [int(x) for x in line.split()]
arr_log10t = []
arr_rrc_low_n = []
arr_rrc_total = []
for _ in range(int(t_count)):
log10t, rrc_low_n, rrc_total = [float(x) for x in frecomb.readline().split()]
arr_log10t.append(log10t)
arr_rrc_low_n.append(rrc_low_n)
arr_rrc_total.append(rrc_total)
recombdata_thision = pd.DataFrame({
'log10T_e': arr_log10t, 'rrc_low_n': arr_rrc_low_n, 'rrc_total': arr_rrc_total})
recombdata_thision.eval('T_e = 10 ** log10T_e', inplace=True)
yield Z, upper_ionstage, recombdata_thision
@lru_cache(maxsize=4)
def get_ionrecombratecalibration(modelpath):
"""Read recombrates file."""
recombdata = {}
with open(Path(modelpath, 'recombrates.txt'), 'r') as frecomb:
for Z, upper_ionstage, dfrrc in parse_recombratefile(frecomb):
recombdata[(Z, upper_ionstage)] = dfrrc
return recombdata | {
"repo_name": "lukeshingles/artistools",
"path": "artistools/atomic/__init__.py",
"copies": "1",
"size": "6708",
"license": "mit",
"hash": -3191612066305810400,
"line_mean": 35.6612021858,
"line_max": 118,
"alpha_frac": 0.6042039356,
"autogenerated": false,
"ratio": 3.5586206896551724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46628246252551725,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
import random
import re
from sqlalchemy import Column, String
from seabird.plugin import Plugin, CommandMixin
from .db import Base, DatabaseMixin
class Bleep(Base):
__tablename__ = "bleep"
bad_word = Column(String, primary_key=True)
replacement = Column(String)
class BleepPlugin(Plugin, CommandMixin, DatabaseMixin):
__disabled__ = True
REPLIES = [
'Hey, watch your mouth! Say "{}" instead.',
'Pottymouth! We say "{}" in this channel.',
'Uh oh, you should really say "{}" instead.',
'Time to put a quarter in the jar! You should really use "{}" instead.',
]
@lru_cache(maxsize=1)
def _get_bleeps(self):
"""
Gets a list of bleeped words and their replacements
@return [Bleep] List of bleeps
"""
with self.db.session() as session:
query = session.query(Bleep)
bleeps = query.all()
# This call is necessary to use the retrieved bleeps outside the
# scope of the with
session.expunge_all()
return bleeps
def cmd_bleep(self, msg):
"""
Begin to bleep `bad_word` with `replacement`.
`bad_word` is args[0]
`replacement` is args[1]
"""
args = msg.trailing.lower().strip().split(" ")
if len(args) < 2:
self.bot.reply(msg, "Must supply a bad word and a replacement")
return
bad_word = args[0]
replacement = args[1]
with self.db.session() as session:
bleep, _ = session.get_or_create(Bleep, bad_word=bad_word)
bleep.replacement = replacement
session.add(bleep)
# Invalidate the cache on _get_bleeps so that we read the new value
self._get_bleeps.cache_clear()
self.bot.reply(
msg, "Will now bleep out {} with {}".format(bad_word, replacement)
)
def irc_privmsg(self, msg): # pylint: disable=arguments-differ
super().irc_privmsg(msg)
if not msg.from_channel:
return
trailing = msg.trailing.lower().strip()
if trailing.startswith("{}bleep".format(self.bot.config["PREFIX"])):
return
words = trailing.split(" ")
for bleep in self._get_bleeps():
regex = re.compile(r"\b{}\b".format(bleep.bad_word))
for word in words:
if regex.match(word):
reply = random.choice(self.REPLIES)
self.bot.mention_reply(msg, reply.format(bleep.replacement))
| {
"repo_name": "belak/pyseabird",
"path": "seabird/modules/bleep.py",
"copies": "2",
"size": "2600",
"license": "mit",
"hash": 9129577346569968000,
"line_mean": 27.5714285714,
"line_max": 80,
"alpha_frac": 0.5753846154,
"autogenerated": false,
"ratio": 3.903903903903904,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5479288519303903,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
import re
from subprocess import call, check_output, DEVNULL, CalledProcessError
from . import BaseSystem
BINARIES = [
'chromium-browser', # ubuntu/debian
'chromium', # arch
'google-chrome-stable', # arch
]
class System(BaseSystem):
@property
@lru_cache()
def browser_path(self):
for binary in BINARIES:
try:
return check_output(['which', binary], stderr=DEVNULL)[:-1].decode('utf8')
except CalledProcessError:
pass
raise FileNotFoundError("No supported browsers found!")
@property
@lru_cache()
def displays(self):
connected = []
for idx, line in enumerate(check_output(['xrandr']).decode('utf8').split('\n')):
if ' connected' in line:
matches = re.match(r".* (?P<width>[0-9]+)x(?P<height>[0-9]+)\+(?P<x>[0-9]+)\+(?P<y>[0-9]+)", line)
display = {k: int(v) for k, v in matches.groupdict().items()}
display['id'] = idx
connected.append(display)
return connected
| {
"repo_name": "foxxyz/multibrowse",
"path": "systems/linux.py",
"copies": "1",
"size": "1121",
"license": "mit",
"hash": -8443970751044435000,
"line_mean": 31.0285714286,
"line_max": 114,
"alpha_frac": 0.5637823372,
"autogenerated": false,
"ratio": 3.9059233449477353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.995571231282601,
"avg_score": 0.0027986738643448815,
"num_lines": 35
} |
from functools import lru_cache
import re
from discord.ext import commands
from ..data import fighters as _fighters
from .errors import SmashError
WORD = re.compile(r'\W+')
def find_ngrams(text: str, number: int = 3) -> set:
"""https://stackoverflow.com/a/52389482
returns a set of ngrams for the given string
:param text: the string to find ngrams for
:param number: the length the ngrams should be. defaults to 3 (trigrams)
:return: set of ngram strings
"""
if not text:
return set()
words = [f' {x} ' for x in WORD.split(text.lower()) if x.strip()]
ngrams = set()
for word in words:
for x in range(0, len(word) - number + 1):
ngrams.add(word[x:x + number])
return ngrams
def compare_ngrams(ngrams1, ngrams2):
"""https://stackoverflow.com/a/52389482
Finds the similarity between 2 ngrams.
~~0 being completely different, and 1 being equal~~
Keeping above reference, though it has been modified.
Instead of 0-1, this function returns the number of similar ngrams.
"""
return len(ngrams1 & ngrams2)
class Fighter(commands.Converter):
__fighters = []
async def convert(self, ctx, arg):
return self.get_closest(arg)
@classmethod
def add(cls, name, color, aliases=()):
self = cls()
self.name = name
self.color = color
self.aliases = aliases
self.replace_on_insert = False
self.__ngrams = frozenset(find_ngrams(name).union(*(find_ngrams(alias) for alias in aliases)))
cls.__fighters.append(self)
@classmethod
def all(cls):
return iter(cls.__fighters)
@classmethod
@lru_cache()
def get_closest(cls, name):
ngrams = find_ngrams(name)
similarities = {fighter: compare_ngrams(fighter.__ngrams, ngrams) for fighter in cls.all()}
sorted_sims = sorted(similarities.items(), key=lambda pair: (pair[1], len(pair[0].name)))
highest = max(pair[1] for pair in sorted_sims)
if highest == 0:
raise SmashError(f'{name} is not a valid fighter.')
filtered = filter(lambda pair: pair[1] == highest, sorted_sims)
most_similar = next(filtered)
return most_similar[0]
def __str__(self):
return self.name
class _FakeFighter:
ALLOWED = {'-': True, '???': False}
__instances = {} # hack to only ever have 1 + len(ALLOWED) instances
@classmethod
def populate(cls):
for val, replace in cls.ALLOWED.items():
self = cls()
self.name = val
self.color = 0xfffffe
self.replace_on_insert = replace
cls.__instances[val] = self
@property
def names(self):
return self.__instances.keys()
def __instancecheck__(self, instance): # allows instance to act as class in `isinstance`
return isinstance(instance, self.__class__)
def __call__(self, val):
if val not in self.ALLOWED:
raise ValueError(f'Argument must be one of ({", ".join(self.ALLOWED)})')
return self.__instances[val]
def __str__(self):
return self.name
FakeFighter = _FakeFighter()
FakeFighter.populate()
for fighter_data in _fighters:
Fighter.add(*fighter_data)
| {
"repo_name": "mikevb1/discordbot",
"path": "cogs/smash/models/fighter.py",
"copies": "1",
"size": "3267",
"license": "mit",
"hash": -4853336788604306000,
"line_mean": 28.1696428571,
"line_max": 102,
"alpha_frac": 0.6192225283,
"autogenerated": false,
"ratio": 3.594059405940594,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9710946406416869,
"avg_score": 0.00046710556474485797,
"num_lines": 112
} |
from functools import lru_cache
import requests
from urllib.parse import quote_plus
import config
_headers = {"user-agent": "/u/gracefulcharity card bot",
"X-Mashape-Authorization": config.MASHAPE_API_KEY}
BASE_URI = 'https://yugiohprices.p.mashape.com'
@lru_cache(maxsize=128)
def get_card_data(card_name):
end_point = '/card_data/'
_headers = {"user-agent": "/u/gracefulcharity card bot",
"X-Mashape-Authorization": config.MASHAPE_API_KEY}
try:
response = requests.get(BASE_URI + end_point + quote_plus(card_name),
headers=_headers)
except requests.exceptions.RequestException:
raise YPGApiFail
else:
response.connection.close()
if response.ok:
json = response.json()
if json.get('status', '') == 'success':
return json['data']
else:
raise YPGApiFail()
else:
raise YPGApiFail()
class YPGApiFail(Exception):
pass
| {
"repo_name": "burkean/gracefulcharitybot",
"path": "gracefulcharity/yugiohprices.py",
"copies": "1",
"size": "1027",
"license": "mit",
"hash": -4387691744370818600,
"line_mean": 26.0263157895,
"line_max": 77,
"alpha_frac": 0.5959104187,
"autogenerated": false,
"ratio": 3.694244604316547,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9790155023016547,
"avg_score": 0,
"num_lines": 38
} |
from functools import lru_cache
import requests, re, isodate
from cache import TimedObjCache
import config
# YouTube utilities
_yt_sigs = ["youtube.com", "youtu.be"]
_yt_headers = {"User-Agent": config.useragent}
_yt_api_base = "https://www.googleapis.com/youtube/v3/"
_yt_video_url = _yt_api_base+"videos?part={type}&id={id}"
_yt_playlist_url = _yt_api_base+"playlists?part={type}&id={id}"
_yt_comments_url = _yt_api_base+"commentThreads?part={type}&textFormat=plainText&videoId={id}"
_yt_last_time = 0
_yt_cache = TimedObjCache(expiration=1800) # 30 min
_yt_video_pattern = re.compile("(?:youtube\.com/(?:(?:watch|attribution_link)\?(?:.*(?:&|%3F|&))?v(?:=|%3D)|embed/|v/)|youtu\.be/)([a-zA-Z0-9-_]{11})")
_yt_playlist_pattern = re.compile("youtube\.com/playlist\?list=([a-zA-Z0-9-_]+)")
_yt_channel_pattern = re.compile("youtube\.com/(?:#/)?(?:channel|user)/([a-zA-Z0-9-_]+)")
def is_youtube_link(url):
url = url.lower()
for sig in _yt_sigs:
if sig in url:
return True
return False
def is_youtube_video(url):
if not is_youtube_link(url):
return False
video_id = _get_youtube_video_id(url)
return not video_id is None
def _get_youtube_video_id(url):
match = _yt_video_pattern.findall(url)
if len(match) > 0:
return match[0]
return None
def is_youtube_playlist(url):
if not is_youtube_link(url):
return False
video_id = _get_youtube_playlist_id(url)
return not video_id is None
def _get_youtube_playlist_id(url):
match = _yt_playlist_pattern.findall(url)
if len(match) > 0:
return match[0]
return None
## Getting channel information
def get_youtube_channel(url):
match = _yt_channel_pattern.findall(url)
if len(match) > 0:
return match[0], None
ytid = _get_youtube_video_id(url)
if not ytid is None:
return _get_channel_from_video(ytid)
ytid = _get_youtube_playlist_id(url)
if not ytid is None:
return _get_channel_from_playlist(ytid)
@lru_cache()
def _get_channel_from_video(video_id):
url = _yt_video_url.format(type="snippet", id=video_id)
response = _youtube_request(url)
if response is None or len(response["items"]) == 0:
return None
video_info = response["items"][0]
if video_info["kind"] == "youtube#video" and "snippet" in video_info: # Sanity check
snippet = video_info["snippet"]
channelId = snippet["channelId"]
channelName = snippet["channelTitle"]
return channelId, channelName
return None
@lru_cache()
def _get_channel_from_playlist(playlist_id):
url = _yt_playlist_url.format(type="snippet", id=playlist_id)
response = _youtube_request(url)
if response is None or len(response["items"]) == 0:
return None
video_info = response["items"][0]
if video_info["kind"] == "youtube#playlist" and "snippet" in video_info: # Sanity check
snippet = video_info["snippet"]
channelId = snippet["channelId"]
channelName = snippet["channelTitle"]
return channelId, channelName
return None
## Getting video information
def get_youtube_video_description(url):
video_id = _get_youtube_video_id(url)
if not video_id is None:
url = _yt_video_url.format(type="snippet", id=video_id)
response = _youtube_request(url)
if response is None or len(response["items"]) == 0:
return None
video_info = response["items"][0]
if video_info["kind"] == "youtube#video" and "snippet" in video_info: # Sanity check
description = video_info["snippet"]["description"]
return description
return None
def get_youtube_video_duration(url):
video_id = _get_youtube_video_id(url)
if not video_id is None:
url = _yt_video_url.format(type="contentDetails", id=video_id)
response = _youtube_request(url)
if response is None or len(response["items"]) == 0:
return None
video_info = response["items"][0]
if video_info["kind"] == "youtube#video" and "contentDetails" in video_info: # Sanity check
duration = video_info["contentDetails"]["duration"]
duration = isodate.parse_duration(duration).total_seconds()
return duration
return None
def get_youtube_comments(url):
video_id = _get_youtube_video_id(url)
if not video_id is None:
url = _yt_comments_url.format(type="snippet", id=video_id)
response = _youtube_request(url)
if response is None or len(response["items"]) == 0:
return None
comment_threads = response["items"]
whargarbl = []
for comment_thread in comment_threads:
comment = comment_thread["snippet"]["topLevelComment"]["snippet"]
text = comment["textDisplay"]
if text.endswith("\ufeff"):
text = text[:-1]
whargarbl.append(text)
return whargarbl
return None
def _youtube_request(request_url):
global _yt_last_time
cache_result = _yt_cache.get(request_url)
if cache_result is not None:
return cache_result
url = request_url+"&key="+config.youtube_api_key
_yt_last_time = _requst_wait(_yt_last_time, 0.25)
response = requests.get(url, headers=_yt_headers)
if response.status_code == 200:
#print("Success!")
good_stuff = response.json()
_yt_cache.store(request_url, good_stuff)
return good_stuff
else:
print("YouTube request failed ({}): {}".format(response.status_code, url))
return None
# Misc. helpers
from time import time, sleep
def _requst_wait(last_time, delay):
time_since = time() - last_time
if 0 < time_since < delay:
sleep(delay - time_since)
return time()
| {
"repo_name": "TheEnigmaBlade/SpamShark",
"path": "media_util.py",
"copies": "1",
"size": "5284",
"license": "mit",
"hash": 5188566427218651000,
"line_mean": 28.1933701657,
"line_max": 155,
"alpha_frac": 0.6905753217,
"autogenerated": false,
"ratio": 2.8937568455640745,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4084332167264074,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
import time
from sauna.plugins.base import PsutilPlugin
from sauna.plugins import human_to_bytes, bytes_to_human, PluginRegister
my_plugin = PluginRegister('Network')
@my_plugin.plugin()
class Network(PsutilPlugin):
def __init__(self, config):
super().__init__(config)
@my_plugin.check()
def upload_data_speed(self, check_config):
ul, _, _, _ = self.get_network_data(
interface=check_config['interface'])
ul = round(ul, 2)
return (
self._value_to_status_less(ul, check_config, human_to_bytes),
'Upload speed: {}/s'.format(bytes_to_human(ul))
)
@my_plugin.check()
def download_data_speed(self, check_config):
_, dl, _, _ = self.get_network_data(
interface=check_config['interface'])
dl = round(dl, 2)
return (
self._value_to_status_less(dl, check_config, human_to_bytes),
'Download speed: {}/s'.format(bytes_to_human(dl))
)
@my_plugin.check()
def upload_packet_speed(self, check_config):
_, _, ul, _ = self.get_network_data(
interface=check_config['interface'])
ul = round(ul, 2)
return (
self._value_to_status_less(ul, check_config),
'Upload : {} p/s'.format(ul)
)
@my_plugin.check()
def download_packet_speed(self, check_config):
_, _, _, dl = self.get_network_data(
interface=check_config['interface'])
dl = round(dl, 2)
return (
self._value_to_status_less(dl, check_config),
'Download : {} p/s'.format(dl)
)
@lru_cache()
def get_network_data(self, interface='eth0', delay=1):
t0 = time.time()
counter = self.psutil.net_io_counters(pernic=True)[interface]
first_values = (counter.bytes_sent, counter.bytes_recv,
counter.packets_sent, counter.packets_recv)
time.sleep(delay)
counter = self.psutil.net_io_counters(pernic=True)[interface]
t1 = time.time()
last_values = (counter.bytes_sent, counter.bytes_recv,
counter.packets_sent, counter.packets_recv)
kb_ul, kb_dl, p_ul, p_dl = [
(last - first) / (t1 - t0)
for last, first in zip(last_values, first_values)
]
return kb_ul, kb_dl, p_ul, p_dl
@staticmethod
def config_sample():
return '''
- type: Network
checks:
- type: upload_data_speed
interface: em1
# Crit if download > 2MB/s
warn: 500K
crit: 2M
- type: download_data_speed
interface: em1
# Warn if upload > 500KB/s
warn: 500K
crit: 2M
- type: upload_packet_speed
interface: em1
# Values are in packet/s
warn: 500
crit: 2000
- type: download_packet_speed
interface: em1
# Values are in packet/s
warn: 500
crit: 2000
'''
| {
"repo_name": "NicolasLM/sauna",
"path": "sauna/plugins/ext/network.py",
"copies": "1",
"size": "3029",
"license": "bsd-2-clause",
"hash": -3209681635300051500,
"line_mean": 28.9900990099,
"line_max": 73,
"alpha_frac": 0.5546384946,
"autogenerated": false,
"ratio": 3.5803782505910164,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4635016745191016,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from aiohttp_session import get_session
from wtforms import Form, PasswordField
from wtforms.fields.html5 import EmailField
from wtforms.validators import Required, EqualTo, Length, Email
from wtforms.csrf.session import SessionCSRF
from .cfg import cfg
from .utils import is_confirmation_expired
def get(name):
return create()[name]
@lru_cache()
def create():
# We don't have real settings on import stage, so we need to defer
# initialization of forms
class BaseForm(Form):
@classmethod
async def init(cls, request, *args, **kwargs):
session = await get_session(request)
kwargs.setdefault('meta', {})['csrf_context'] = session
return cls(await request.post(), *args, **kwargs)
class Meta:
csrf = True
csrf_class = SessionCSRF
csrf_secret = cfg.CSRF_SECRET.encode('utf-8')
csrf_time_limit = None
def bind_field(self, form, unbound_field, options):
# auto add strip_filter
filters = unbound_field.kwargs.get('filters', [])
filters.append(strip_filter)
return unbound_field.bind(
form=form, filters=filters, **options)
def validate(self):
result = super().validate()
if 'csrf_token' in self.errors:
for field in self:
field.errors.append(self.errors['csrf_token'][0])
break
return result
def strip_filter(value):
if value is not None and hasattr(value, 'strip'):
return value.strip()
return value
class Registration(BaseForm):
email = EmailField('Email', [
Required(),
Email(),
])
password = PasswordField('Password', [
Required(),
Length(*cfg.PASSWORD_LEN),
EqualTo('confirm', message=cfg.MSG_PASSWORDS_NOT_MATCH),
])
confirm = PasswordField('Repeat password', [
Required(),
Length(*cfg.PASSWORD_LEN),
])
async def validate(self):
db = cfg.STORAGE
if not super().validate():
return False
user = await db.get_user({'email': self.email.data})
if not user:
return True
if user['status'] == 'confirmation':
confirmation = await db.get_confirmation(
{'user': user, 'action': 'registration'})
if is_confirmation_expired(confirmation):
await db.delete_confirmation(confirmation)
await db.delete_user(user)
return True
self.email.errors.append(cfg.MSG_EMAIL_EXISTS)
return False
class Login(BaseForm):
email = EmailField('Email', [
Required(),
Email(),
])
password = PasswordField('Password', [
Required(),
Length(*cfg.PASSWORD_LEN),
])
class ResetPasswordRequest(BaseForm):
email = EmailField('Email', [
Required(),
Email(),
])
class ResetPassword(BaseForm):
password = PasswordField('New password', [
Required(),
Length(*cfg.PASSWORD_LEN),
EqualTo('confirm', message=cfg.MSG_PASSWORDS_NOT_MATCH),
])
confirm = PasswordField('Repeat password', [
Required(),
Length(*cfg.PASSWORD_LEN),
])
class ChangeEmail(BaseForm):
email = EmailField('New email', [Email()])
def validate(self, cur_email):
return super().validate() and self.email.data != cur_email
class ChangePassword(BaseForm):
cur_password = PasswordField('Current password', [
Required(),
Length(*cfg.PASSWORD_LEN),
])
new_password = PasswordField('New password', [
Required(),
Length(*cfg.PASSWORD_LEN),
EqualTo('confirm', message=cfg.MSG_PASSWORDS_NOT_MATCH),
])
confirm = PasswordField('Repeat new password', [
Required(),
Length(*cfg.PASSWORD_LEN),
])
return locals()
| {
"repo_name": "imbolc/aiohttp-login",
"path": "aiohttp_login/forms.py",
"copies": "1",
"size": "4297",
"license": "isc",
"hash": 2105188118332951000,
"line_mean": 29.9136690647,
"line_max": 70,
"alpha_frac": 0.5466604608,
"autogenerated": false,
"ratio": 4.706462212486309,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5753122673286309,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from aiohttp.web_exceptions import HTTPNotFound
from .models import Departamentos
import chilero.web
@lru_cache()
def departamentos():
return Departamentos.load()
@lru_cache()
def search(keywords):
def _do_search():
for m in departamentos().as_complete_choices():
for kw in keywords:
if kw.lower() in m[1].lower():
yield m
break
return list(_do_search())
class Municipios(chilero.web.Resource):
resource_name = 'municipios'
def collection_options(self, **kwargs):
return chilero.web.Response(headers=[
['Access-Control-Allow-Methods', 'GET, POST, OPTIONS'],
['Access-Control-Allow-Origin', '*'],
["Access-Control-Allow-Headers",
"Cookie, Content-Type, Access-Control-Allow-Headers, Authorization, X-Requested-With"]
])
def entity_options(self, **kwargs):
return chilero.web.Response(headers=[
['Access-Control-Allow-Methods', 'GET, PATCH'],
['Access-Control-Allow-Origin', '*'],
["Access-Control-Allow-Headers",
"Cookie, Content-Type, Access-Control-Allow-Headers, Authorization, X-Requested-With"]
])
def index(self):
search_terms = self.request.GET.get('search')
if search_terms:
terms = search_terms.lower().split()
items = search(tuple(terms))
else:
items = departamentos().as_complete_choices()
response = dict(
self=self.get_self_url(),
data=dict(
offset=0,
limit=0,
next=None,
prev=None,
count=len(items),
length=len(items)
),
index=[self.serialize_object(i) for i in items]
)
return self.response(response)
def serialize_object(self, obj):
return dict(
id=obj[0],
nombre=obj[1],
url=self.get_object_url(obj[0]),
_label=obj[1]
)
def show(self, id):
try:
m = dict(departamentos().as_complete_choices())[id]
except KeyError:
raise HTTPNotFound()
return self.response(self.serialize_object([id, m]))
| {
"repo_name": "guate/division-politica",
"path": "guate/division_politica/resources.py",
"copies": "1",
"size": "2335",
"license": "mit",
"hash": 2446156780124314000,
"line_mean": 27.4756097561,
"line_max": 99,
"alpha_frac": 0.5498929336,
"autogenerated": false,
"ratio": 4.025862068965517,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5075755002565516,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from .concurrency import ConcurrentShelf
class AddressDictionary():
def __init__(self, file_name):
self._file_name = file_name
self._closed = False
self._shelf = None
self._shelf = ConcurrentShelf(file_name)
self._shelf.lock()
if '__last_id' not in self._shelf:
self._shelf['__last_id'] = 0
self._shelf.unlock()
@lru_cache(maxsize=4096)
def address_to_id(self, address):
address_key = '__address__' + address
if address_key in self._shelf:
return self._shelf[address_key]
else:
self._shelf.lock()
new_id = self._shelf['__last_id'] + 1
self._shelf['__last_id'] = new_id
new_id = '__A{}'.format(new_id)
self._shelf[address_key] = new_id
id_key = '__id__' + new_id
self._shelf[id_key] = address
self._shelf.unlock()
return new_id
@lru_cache(maxsize=4096)
def id_to_address(self, id):
id_key = '__id__' + id
return self._shelf[id_key]
| {
"repo_name": "probprog/pyprob",
"path": "pyprob/address_dictionary.py",
"copies": "1",
"size": "1116",
"license": "bsd-2-clause",
"hash": -6442568795722627000,
"line_mean": 30,
"line_max": 49,
"alpha_frac": 0.5304659498,
"autogenerated": false,
"ratio": 3.509433962264151,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45398999120641514,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from conditional import ldap
def _ldap_get_group_members(group):
return ldap.get_group(group).get_members()
def _ldap_is_member_of_group(member, group):
group_list = member.get("memberOf")
for group_dn in group_list:
if group == group_dn.split(",")[0][3:]:
return True
return False
def _ldap_add_member_to_group(account, group):
if not _ldap_is_member_of_group(account, group):
ldap.get_group(group).add_member(account, dn=False)
def _ldap_remove_member_from_group(account, group):
if _ldap_is_member_of_group(account, group):
ldap.get_group(group).del_member(account, dn=False)
@lru_cache(maxsize=1024)
def _ldap_is_member_of_directorship(account, directorship):
directors = ldap.get_directorship_heads(directorship)
for director in directors:
if director.uid == account.uid:
return True
@lru_cache(maxsize=1024)
def ldap_get_member(username):
return ldap.get_member(username, uid=True)
@lru_cache(maxsize=1024)
def ldap_get_active_members():
return _ldap_get_group_members("active")
@lru_cache(maxsize=1024)
def ldap_get_intro_members():
return _ldap_get_group_members("intromembers")
@lru_cache(maxsize=1024)
def ldap_get_onfloor_members():
return _ldap_get_group_members("onfloor")
@lru_cache(maxsize=1024)
def ldap_get_current_students():
return _ldap_get_group_members("current_student")
def ldap_is_active(account):
return _ldap_is_member_of_group(account, 'active')
def ldap_is_alumni(account):
# If the user is not active, they are an alumni.
return not _ldap_is_member_of_group(account, 'active')
def ldap_is_eboard(account):
return _ldap_is_member_of_group(account, 'eboard')
def ldap_is_rtp(account):
return _ldap_is_member_of_group(account, 'rtp')
def ldap_is_intromember(account):
return _ldap_is_member_of_group(account, 'intromembers')
def ldap_is_onfloor(account):
return _ldap_is_member_of_group(account, 'onfloor')
def ldap_is_financial_director(account):
return _ldap_is_member_of_directorship(account, 'Financial')
def ldap_is_eval_director(account):
return _ldap_is_member_of_directorship(account, 'Evaluations')
def ldap_is_current_student(account):
return _ldap_is_member_of_group(account, 'current_student')
def ldap_set_housingpoints(account, housing_points):
account.housingPoints = housing_points
ldap_get_current_students.cache_clear()
ldap_get_member.cache_clear()
def ldap_set_roomnumber(account, room_number):
if room_number == "":
room_number = None
account.roomNumber = room_number
ldap_get_current_students.cache_clear()
ldap_get_member.cache_clear()
def ldap_set_active(account):
_ldap_add_member_to_group(account, 'active')
ldap_get_active_members.cache_clear()
ldap_get_member.cache_clear()
def ldap_set_inactive(account):
_ldap_remove_member_from_group(account, 'active')
ldap_get_active_members.cache_clear()
ldap_get_member.cache_clear()
def ldap_set_current_student(account):
_ldap_add_member_to_group(account, 'current_student')
ldap_get_current_students.cache_clear()
ldap_get_member.cache_clear()
def ldap_set_non_current_student(account):
_ldap_remove_member_from_group(account, 'current_student')
ldap_get_current_students.cache_clear()
ldap_get_member.cache_clear()
def ldap_get_roomnumber(account):
try:
return account.roomNumber
except AttributeError:
return ""
| {
"repo_name": "RamZallan/conditional",
"path": "conditional/util/ldap.py",
"copies": "1",
"size": "3553",
"license": "mit",
"hash": -5255855859427642000,
"line_mean": 24.3785714286,
"line_max": 66,
"alpha_frac": 0.7016605685,
"autogenerated": false,
"ratio": 3.163846838824577,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9365507407324576,
"avg_score": 0,
"num_lines": 140
} |
from functools import lru_cache
from custom import rewards
from stats.models import Award, Reward
_awards_tour = Award.objects.filter(type='tour')
_awards_mission = Award.objects.filter(type='mission')
_awards_sortie = Award.objects.filter(type='sortie')
@lru_cache(maxsize=32)
def get_reward_func(func_name):
return getattr(rewards, func_name)
@lru_cache(maxsize=512)
def rewarding(award_id, player_id):
return Reward.objects.get_or_create(award_id=award_id, player_id=player_id)
def reward_tour(player):
for award in _awards_tour:
if get_reward_func(award.func)(player=player):
rewarding(award_id=award.id, player_id=player.id)
def reward_mission(player_mission):
player = player_mission.player
for award in _awards_mission:
if get_reward_func(award.func)(player_mission=player_mission):
rewarding(award_id=award.id, player_id=player.id)
def reward_sortie(sortie):
player = sortie.player
for award in _awards_sortie:
if get_reward_func(award.func)(sortie=sortie):
rewarding(award_id=award.id, player_id=player.id)
| {
"repo_name": "Flyingfox646/flyingfox",
"path": "src/stats/rewards.py",
"copies": "1",
"size": "1117",
"license": "mit",
"hash": 1857266294850504700,
"line_mean": 27.641025641,
"line_max": 79,
"alpha_frac": 0.7072515667,
"autogenerated": false,
"ratio": 3.01078167115903,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42180332378590296,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from deuce import conf
import deuce
import importlib
from deuce.drivers.metadatadriver import MetadataStorageDriver,\
OverlapError, GapError, ConstraintError
# SQL schemas. Note: the schema is versions
# in such a way that new instances always start
# with user version 1, then proceeed to upgrade
# through each version until we get to the latest.
schemas = list()
schemas.append([
"""
CREATE TABLE files
(
projectid TEXT NOT NULL,
vaultid TEXT NOT NULL,
fileid TEXT NOT NULL,
finalized INTEGER NOT NULL DEFAULT 0,
size INTEGER DEFAULT 0,
PRIMARY KEY(projectid, vaultid, fileid)
)
""",
"""
CREATE TABLE fileblocks
(
projectid TEXT NOT NULL,
vaultid TEXT NOT NULL,
fileid TEXT NOT NULL,
blockid TEXT NOT NULL,
offset INTEGER NOT NULL,
UNIQUE (projectid, vaultid, fileid, blockid, offset)
)
""",
"""
CREATE TABLE blocks
(
projectid TEXT NOT NULL,
vaultid TEXT NOT NULL,
blockid TEXT NOT NULL,
storageid TEXT NOT NULL,
size INTEGER NOT NULL,
reftime DATETIME NOT NULL,
isinvalid BOOLEAN NOT NULL DEFAULT 0,
PRIMARY KEY(projectid, vaultid, blockid)
)
""",
"""
CREATE TABLE vaults
(
projectid TEXT NOT NULL,
vaultid TEXT NOT NULL,
PRIMARY KEY(projectid, vaultid)
)
"""
]) # Version 1
CURRENT_DB_VERSION = len(schemas)
SQL_CREATE_VAULT = '''
INSERT OR REPLACE INTO vaults
(projectid, vaultid)
VALUES (:projectid, :vaultid)
'''
SQL_DELETE_VAULT = '''
DELETE FROM vaults
where projectid=:projectid
AND vaultid=:vaultid
'''
SQL_GET_ALL_VAULT = '''
SELECT vaultid
FROM vaults
WHERE projectid = :projectid
AND vaultid >= :marker
ORDER BY vaultid
LIMIT :limit
'''
SQL_CREATE_FILE = '''
INSERT INTO files (projectid, vaultid, fileid)
VALUES (:projectid, :vaultid, :fileid)
'''
SQL_GET_BLOCK = '''
SELECT size
FROM blocks
WHERE projectid = :projectid
AND vaultid = :vaultid
AND blockid = :blockid
AND isinvalid = 0
'''
SQL_GET_FILE = '''
SELECT finalized
FROM files
WHERE projectid = :projectid
AND vaultid = :vaultid
AND fileid = :fileid
'''
SQL_GET_FILE_SIZE = '''
SELECT size
FROM files
WHERE projectid = :projectid
AND vaultid = :vaultid
AND fileid = :fileid
'''
SQL_DELETE_FILE = '''
DELETE FROM files
where projectid=:projectid
AND vaultid=:vaultid
AND fileid=:fileid
'''
SQL_GET_ALL_FILE_BLOCKS = '''
SELECT blockid, offset
FROM fileblocks
WHERE projectid = :projectid
AND vaultid = :vaultid
AND fileid = :fileid
ORDER BY offset
'''
SQL_GET_BAD_BLOCKS = '''
SELECT blockid
FROM blocks
WHERE projectid = :projectid
AND vaultid = :vaultid
AND isinvalid = 1
'''
SQL_GET_FILE_PER_BLOCK = '''
SELECT fileid
FROM fileblocks
WHERE projectid = :projectid
AND vaultid = :vaultid
AND blockid = :blockid
'''
SQL_UPDATE_REF_TIME_BLOCKS_IN_FILE = '''
UPDATE blocks
SET reftime = strftime('%s', 'now')
WHERE projectid = :projectid
AND vaultid = :vaultid
AND blockid IN (SELECT blockid
FROM fileblocks
WHERE projectid = :projectid
AND vaultid = :vaultid
AND fileid = :fileid)
'''
SQL_GET_COUNT_ALL_FILE_BLOCKS = '''
SELECT COUNT(DISTINCT(blockid))
FROM fileblocks
WHERE projectid = :projectid
AND vaultid = :vaultid
'''
SQL_GET_FILE_BLOCKS = '''
SELECT blockid, offset
FROM fileblocks
WHERE projectid = :projectid
AND vaultid = :vaultid
AND fileid = :fileid
AND offset >= :offset
ORDER BY offset
LIMIT :limit
'''
SQL_DELETE_FILE_BLOCKS_FOR_FILE = '''
DELETE FROM fileblocks
WHERE projectid = :projectid
AND vaultid = :vaultid
AND fileid = :fileid
'''
SQL_GET_ALL_BLOCKS = '''
SELECT blockid
FROM blocks
WHERE projectid = :projectid
AND vaultid = :vaultid
AND blockid >= :marker
order by blockid
LIMIT :limit
'''
SQL_GET_STORAGE_ID = '''
SELECT storageid
FROM blocks
WHERE projectid = :projectid
AND vaultid = :vaultid
AND blockid = :blockid
'''
SQL_GET_BLOCK_ID = '''
SELECT blockid
FROM blocks
WHERE projectid = :projectid
AND vaultid = :vaultid
AND storageid = :storageid
'''
SQL_GET_COUNT_ALL_BLOCKS = '''
SELECT COUNT(DISTINCT(blockid))
FROM blocks
WHERE projectid = :projectid
AND vaultid = :vaultid
'''
SQL_GET_ALL_FILES = '''
SELECT fileid
FROM files
WHERE projectid=:projectid
AND vaultid = :vaultid
AND fileid >= :marker
AND finalized = :finalized
order by fileid
LIMIT :limit
'''
SQL_GET_COUNT_ALL_FILES = '''
SELECT COUNT(DISTINCT(fileid))
FROM files
WHERE projectid = :projectid
AND vaultid = :vaultid
'''
SQL_CREATE_FILEBLOCK_LIST = '''
SELECT blocks.blockid, fileblocks.offset, blocks.size
FROM blocks, fileblocks
WHERE fileblocks.blockid = blocks.blockid
AND fileblocks.vaultid = blocks.vaultid
AND fileblocks.projectid = :projectid
AND fileblocks.vaultid = :vaultid
AND fileblocks.fileid = :fileid
AND blocks.isinvalid = 0
ORDER by offset
'''
SQL_FINALIZE_FILE = '''
UPDATE files
SET finalized=1, size=:file_size
WHERE projectid=:projectid
AND fileid=:fileid
AND vaultid=:vaultid
'''
SQL_ASSIGN_BLOCK_TO_FILE = '''
INSERT OR REPLACE INTO fileblocks
(projectid, vaultid, fileid, blockid, offset)
VALUES (:projectid, :vaultid, :fileid, :blockid, :offset)
'''
SQL_REGISTER_BLOCK = '''
INSERT OR REPLACE INTO blocks
(projectid, vaultid, blockid, storageid, size, reftime, isinvalid)
VALUES (:projectid, :vaultid, :blockid, :storageid, :blocksize,
strftime('%s', 'now'), 0)
'''
SQL_UNREGISTER_BLOCK = '''
DELETE FROM blocks
WHERE projectid=:projectid AND blockid=:blockid
'''
SQL_MARK_BLOCK_AS_BAD = '''
UPDATE blocks SET
isinvalid = 1 WHERE
projectid = :projectid AND
vaultid = :vaultid AND
blockid = :blockid
'''
SQL_MARK_BLOCK_AS_GOOD = '''
UPDATE blocks SET
isinvalid = 0 WHERE
projectid = :projectid AND
vaultid = :vaultid AND
blockid = :blockid
'''
SQL_GET_BLOCK_STATUS = '''
SELECT isinvalid
FROM blocks
WHERE projectid=:projectid
AND blockid = :blockid
AND vaultid = :vaultid
'''
SQL_GET_BLOCK_REF_COUNT = '''
SELECT count(*)
FROM fileblocks
WHERE projectid = :projectid
AND vaultid = :vaultid
AND blockid = :blockid
'''
SQL_UPDATE_REF_TIME = '''
UPDATE blocks
SET reftime = strftime('%s', 'now')
WHERE projectid = :projectid
AND vaultid = :vaultid
AND blockid = :blockid
'''
SQL_GET_REF_TIME = '''
SELECT reftime
FROM blocks
WHERE projectid = :projectid
AND vaultid = :vaultid
AND blockid = :blockid
'''
class SqliteStorageDriver(MetadataStorageDriver):
def __init__(self):
self._dbfile = conf.metadata_driver.sqlite.path
# Load the driver module according to the configuration
deuce.db_pack = importlib.import_module(
conf.metadata_driver.sqlite.db_module)
self._conn = getattr(deuce.db_pack, 'Connection')(self._dbfile)
self._do_migrate()
def _get_user_version(self):
res = self._conn.execute('pragma user_version')
row = next(res)
return row[0]
def _set_user_version(self, version):
# NOTE: for whatever reason, pragma's don't seem to
# work with the built-in query formatter so
# we just use string formatting here. This should be
# OK since version is internally generated.
self._conn.execute('pragma user_version=%d' % version)
def _do_migrate(self):
db_ver = self._get_user_version()
for ver in range(db_ver, CURRENT_DB_VERSION):
schema = schemas[db_ver]
for query in schema:
self._conn.execute(query)
db_ver = db_ver + 1
self._set_user_version(db_ver)
def _determine_marker(self, marker):
"""Determines the default marker to use if
the passed marker is None, empty string, etc
"""
return marker or ''
def create_vault(self, vault_id):
"""Creates a representation of a vault."""
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id
}
self._conn.execute(SQL_CREATE_VAULT, args)
self._conn.commit()
# TODO: check that one row was inserted
return
def delete_vault(self, vault_id):
"""Deletes the vault from metadata."""
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id
}
self._conn.execute(SQL_DELETE_VAULT, args)
self._conn.commit()
return
def create_vaults_generator(self, marker=None, limit=None):
"""Creates and returns a generator that will return
the vault IDs.
"""
args = {
'projectid': deuce.context.project_id,
'marker': self._determine_marker(marker),
'limit': self._determine_limit(limit)
}
res = self._conn.execute(SQL_GET_ALL_VAULT, args)
return [row[0] for row in res]
def get_vault_statistics(self, vault_id):
"""Return the statistics on the vault.
"param vault_id: The ID of the vault to gather statistics for"""
res = {}
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id
}
def __stats_query(sql_statement, default_value):
result = self._conn.execute(sql_statement, args)
try:
row = next(result)
return row[0]
except StopIteration: # pragma: no cover
return default_value
except IndexError: # pragma: no cover
return default_value
def __stats_get_vault_file_count():
return __stats_query(SQL_GET_COUNT_ALL_FILES, 0)
def __stats_get_vault_block_count():
return __stats_query(SQL_GET_COUNT_ALL_BLOCKS, 0)
# Add any statistics regarding files
res['files'] = {}
res['files']['count'] = __stats_get_vault_file_count()
# Add any statistics regarding blocks
res['blocks'] = {}
res['blocks']['count'] = __stats_get_vault_block_count()
# Add information about bad blocks and bad files
res['blocks']['bad'], res['files']['bad'] = \
self.vault_health(vault_id)
# Add any statistics specific to the Sqlite backend
res['internal'] = {}
return res
def vault_health(self, vault_id):
'''Returns the number of bad blocks and bad files associated
with a vault'''
args = dict(
projectid=deuce.context.project_id,
vaultid=vault_id,
)
res = self._conn.execute(SQL_GET_BAD_BLOCKS, args)
bad_blocks = [row[0] for row in res]
no_of_bad_blocks = len(bad_blocks)
bad_files = set()
for block_id in bad_blocks:
args = dict(
projectid=deuce.context.project_id,
vaultid=vault_id,
blockid=block_id,
)
result = self._conn.execute(SQL_GET_FILE_PER_BLOCK, args)
bad_file = [row[0] for row in result]
try:
bad_files.add(bad_file[0])
except IndexError:
pass
no_of_bad_files = len(bad_files)
return (no_of_bad_blocks, no_of_bad_files)
def create_file(self, vault_id, file_id):
"""Creates a new file with no blocks and no files"""
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id,
'fileid': file_id
}
self._conn.execute(SQL_CREATE_FILE, args)
self._conn.commit()
# TODO: check that one row was inserted
return file_id
def file_length(self, vault_id, file_id):
"""Retrieve length the of the file."""
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id,
'fileid': file_id
}
res = self._conn.execute(SQL_GET_FILE_SIZE, args)
try:
row = next(res)
return row[0]
except StopIteration:
return 0
def get_block_storage_id(self, vault_id, block_id):
"""Retrieve storage id for a given block id"""
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id,
'blockid': block_id
}
res = self._conn.execute(SQL_GET_STORAGE_ID, args)
try:
row = next(res)
return str(row[0])
except StopIteration:
return None
def get_block_metadata_id(self, vault_id, storage_id):
"""Retrieve block id for a given storage id"""
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id,
'storageid': storage_id
}
res = self._conn.execute(SQL_GET_BLOCK_ID, args)
try:
row = next(res)
return str(row[0])
except StopIteration:
return None
def has_file(self, vault_id, file_id):
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id,
'fileid': file_id
}
res = self._conn.execute(SQL_GET_FILE, args)
try:
row = next(res)
return True
except StopIteration:
return False
def is_finalized(self, vault_id, file_id):
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id,
'fileid': file_id
}
res = self._conn.execute(SQL_GET_FILE, args)
try:
row = next(res)
return row[0] == 1
except StopIteration:
return False
def delete_file(self, vault_id, file_id):
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id,
'fileid': file_id
}
res = self._conn.execute(SQL_UPDATE_REF_TIME_BLOCKS_IN_FILE, args)
self._conn.commit()
res = self._conn.execute(SQL_DELETE_FILE, args)
self._conn.commit()
res = self._conn.execute(SQL_DELETE_FILE_BLOCKS_FOR_FILE, args)
self._conn.commit()
def finalize_file(self, vault_id, file_id, file_size=None):
"""Updates the files table to set a file to finalized and record
its size. This function makes no assumptions about whether or not
the file record actually exists"""
if file_size is None:
file_size = 0
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id,
'fileid': file_id,
'file_size': file_size
}
# Check for gaps and overlaps.
expected_offset = 0
res = self._conn.execute(SQL_CREATE_FILEBLOCK_LIST, args)
for blockid, offset, size in res:
if offset == expected_offset:
expected_offset += size
elif offset < expected_offset: # Overlap scenario
raise OverlapError(deuce.context.project_id, vault_id, file_id,
blockid, startpos=offset, endpos=expected_offset)
else:
raise GapError(deuce.context.project_id, vault_id, file_id,
startpos=expected_offset, endpos=offset)
# Now we must check the very last block
if file_size and file_size != expected_offset:
if expected_offset < file_size:
raise GapError(deuce.context.project_id, vault_id, file_id,
expected_offset, file_size)
else:
assert expected_offset > file_size
raise OverlapError(deuce.context.project_id, vault_id, file_id,
file_size, startpos=file_size, endpos=expected_offset)
res = self._conn.execute(SQL_FINALIZE_FILE, args)
self._conn.commit()
return None
def get_block_data(self, vault_id, block_id):
"""Returns the blocksize for this block"""
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id,
'blockid': block_id
}
res = self._conn.execute(SQL_GET_BLOCK, args)
try:
row = next(res)
except StopIteration:
raise Exception("No such block: {0}".format(block_id))
retval = {}
retval['blocksize'] = list(row)[0]
return retval
def get_file_data(self, vault_id, file_id):
"""Returns a tuple representing data for this file"""
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id,
'fileid': file_id
}
res = self._conn.execute(SQL_GET_FILE, args)
try:
row = next(res)
except StopIteration:
raise Exception("No such file: {0}".format(file_id))
return row
def mark_block_as_bad(self, vault_id, block_id,):
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id,
'blockid': block_id
}
self._conn.execute(SQL_MARK_BLOCK_AS_BAD, args)
self._conn.commit()
@staticmethod
def _block_exists(res, check_status):
if len(res) == 0:
return False
# The result should contain exactly
# one row and one column.
assert len(res) == 1
assert len(res[0]) == 1
if check_status and res[0][0] == 1:
return False
return True
def reset_block_status(self, vault_id, marker=None, limit=None):
blocks = self.create_block_generator(vault_id, marker,
self._determine_limit(limit))
def mark_block_as_good(vault_id, block_id):
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id,
'blockid': block_id
}
self._conn.execute(SQL_MARK_BLOCK_AS_GOOD, args)
self._conn.commit()
for block in blocks:
mark_block_as_good(vault_id, block)
return blocks[-1:][0] if len(blocks) == \
self._determine_limit(limit) else None
def has_block(self, vault_id, block_id, check_status=False):
# Query the blocks table
retval = False
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id,
'blockid': block_id
}
# This query should only ever return zero or 1 row, so
# return that value here
res = list(self._conn.execute(SQL_GET_BLOCK_STATUS, args))
return SqliteStorageDriver._block_exists(res, check_status)
def has_blocks(self, vault_id, block_ids, check_status=False):
results = []
for block_id in block_ids:
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id,
'blockid': block_id
}
res = list(self._conn.execute(SQL_GET_BLOCK_STATUS, args))
if SqliteStorageDriver._block_exists(res, check_status) is False:
results.append(block_id)
return results
def create_block_generator(self, vault_id, marker=None,
limit=None):
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id,
'limit': self._determine_limit(limit),
'marker': self._determine_marker(marker)
}
res = self._conn.execute(SQL_GET_ALL_BLOCKS, args)
return [row[0] for row in res]
def create_file_generator(self, vault_id,
marker=None, limit=None, finalized=True):
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id,
'limit': self._determine_limit(limit),
'marker': self._determine_marker(marker),
'finalized': finalized
}
res = self._conn.execute(SQL_GET_ALL_FILES, args)
return [row[0] for row in res]
def create_file_block_generator(self, vault_id, file_id,
offset=None, limit=None):
args = {
'fileid': file_id,
'projectid': deuce.context.project_id,
'vaultid': vault_id,
}
if limit is None:
query = SQL_GET_ALL_FILE_BLOCKS
else:
query = SQL_GET_FILE_BLOCKS
args.update({
'limit': self._determine_limit(limit),
'offset': offset or 0
})
query_res = self._conn.execute(query, args)
return [(row[0], row[1]) for row in query_res]
def assign_block(self, vault_id, file_id, block_id, offset):
# TODO(jdp): tweak this to support multiple assignments
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id,
'fileid': file_id,
'blockid': block_id,
'offset': offset
}
self._conn.execute(SQL_ASSIGN_BLOCK_TO_FILE, args)
del args['fileid']
del args['offset']
self._conn.execute(SQL_UPDATE_REF_TIME, args)
self._conn.commit()
def assign_blocks(self, vault_id, file_id, block_ids, offsets):
# TODO(jdp): tweak this to support multiple assignments
for block_id, offset in zip(block_ids, offsets):
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id,
'fileid': file_id,
'blockid': block_id,
'offset': offset
}
self._conn.execute(SQL_ASSIGN_BLOCK_TO_FILE, args)
del args['fileid']
del args['offset']
self._conn.execute(SQL_UPDATE_REF_TIME, args)
self._conn.commit()
def register_block(self, vault_id, block_id, storage_id, blocksize):
if not self.has_block(vault_id, block_id, check_status=True):
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id,
'blockid': block_id,
'blocksize': int(blocksize),
'storageid': storage_id
}
self._conn.execute(SQL_REGISTER_BLOCK, args)
self._conn.commit()
def unregister_block(self, vault_id, block_id):
self._require_no_block_refs(vault_id, block_id)
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id,
'blockid': block_id
}
self._conn.execute(SQL_UNREGISTER_BLOCK, args)
self._conn.commit()
def get_block_ref_count(self, vault_id, block_id):
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id,
'blockid': block_id
}
query_res = self._conn.execute(SQL_GET_BLOCK_REF_COUNT, args)
count = next(query_res)[0]
return count if not None else 0
def get_block_ref_modified(self, vault_id, block_id):
args = {
'projectid': deuce.context.project_id,
'vaultid': vault_id,
'blockid': block_id
}
query_res = self._conn.execute(SQL_GET_REF_TIME, args)
try:
return next(query_res)[0]
except:
return 0
def get_health(self):
try:
# TODO: Collect more system statistics.
return ["sqlite is active."]
except: # pragma: no cover
return ["sqlite is not active."]
| {
"repo_name": "rackerlabs/deuce",
"path": "deuce/drivers/sqlite/sqlitemetadatadriver.py",
"copies": "1",
"size": "24162",
"license": "apache-2.0",
"hash": -5310298507624648000,
"line_mean": 25.9064587973,
"line_max": 79,
"alpha_frac": 0.5645641917,
"autogenerated": false,
"ratio": 3.7547785547785546,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.981825631217258,
"avg_score": 0.0002172868611949995,
"num_lines": 898
} |
from functools import lru_cache
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import ManyToOneRel
from django.utils import timezone
from django.utils.crypto import get_random_string
from django.utils.translation import ugettext_lazy as _
from enumfields.fields import EnumIntegerField
from democracy.enums import Commenting, CommentingMapTools
ORDERING_HELP = _("The ordering position for this object. Objects with smaller numbers appear first.")
def generate_id():
return get_random_string(32)
class BaseModelManager(models.Manager):
def get_queryset(self):
return super().get_queryset().exclude(deleted=True)
def public(self, *args, **kwargs):
return self.get_queryset().exclude(published=False).filter(*args, **kwargs)
def with_unpublished(self, *args, **kwargs):
return self.get_queryset().filter(*args, **kwargs)
def deleted(self, *args, **kwargs):
return super().get_queryset().filter(deleted=True).filter(*args, **kwargs)
def everything(self, *args, **kwargs):
return super().get_queryset().filter(*args, **kwargs)
class BaseModel(models.Model):
created_at = models.DateTimeField(
verbose_name=_('time of creation'), default=timezone.now, editable=False, db_index=True
)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL, verbose_name=_('created by'),
null=True, blank=True, related_name="%(class)s_created",
editable=False, on_delete=models.SET_NULL
)
modified_at = models.DateTimeField(
verbose_name=_('time of last modification'), default=timezone.now, editable=False
)
modified_by = models.ForeignKey(
settings.AUTH_USER_MODEL, verbose_name=_('last modified by'),
null=True, blank=True, related_name="%(class)s_modified",
editable=False, on_delete=models.SET_NULL
)
published = models.BooleanField(verbose_name=_('public'), default=True, db_index=True)
deleted = models.BooleanField(verbose_name=_('deleted'), default=False, db_index=True, editable=False)
objects = BaseModelManager()
def save(self, *args, **kwargs):
pk_type = self._meta.pk.get_internal_type()
if pk_type == 'CharField':
if not self.pk:
self.pk = generate_id()
elif pk_type == 'AutoField':
pass
else: # pragma: no cover
raise Exception('Unsupported primary key field: %s' % pk_type)
if not kwargs.pop("no_modified_at_update", False):
# Useful for importing, etc.
self.modified_at = timezone.now()
super().save(*args, **kwargs)
def soft_delete(self, using=None):
self.deleted = True
self.save(update_fields=("deleted",), using=using)
def undelete(self, using=None):
self.deleted = False
self.save(update_fields=("deleted",), using=using)
def delete(self, using=None):
raise NotImplementedError("This model does not support hard deletion")
@classmethod
@lru_cache()
def find_subclass(cls, parent_model):
"""
Find the subclass that's used with the given `parent_model`.
This is only useful for models that are related to another model, such
as Comments or Images.
:param parent_model: A model (class or instance)
:return: The model subclass, or None
:rtype: class|None
"""
for field in parent_model._meta.get_fields(): # pragma: no branch
if isinstance(field, ManyToOneRel) and issubclass(field.related_model, cls): # pragma: no branch
return field.related_model
class Meta:
abstract = True
class StringIdBaseModel(BaseModel):
id = models.CharField(
verbose_name=_('identifier'),
primary_key=True,
max_length=32,
editable=False
)
class Meta:
abstract = True
class Commentable(models.Model):
"""
Mixin for models which can be commented.
"""
n_comments = models.IntegerField(
verbose_name=_('number of comments'),
blank=True,
default=0,
editable=False,
db_index=True
)
commenting = EnumIntegerField(Commenting, verbose_name=_('commenting'), default=Commenting.NONE)
commenting_map_tools = EnumIntegerField(CommentingMapTools, verbose_name=_('commenting_map_tools'),
default=CommentingMapTools.NONE)
voting = EnumIntegerField(Commenting, verbose_name=_('voting'), default=Commenting.REGISTERED)
def recache_n_comments(self):
new_n_comments = self.comments.count()
if new_n_comments != self.n_comments:
self.n_comments = new_n_comments
self.save(update_fields=("n_comments",))
# if commentable has a parent hearing, recache the hearing comment count
if hasattr(self, 'hearing'):
self.hearing.recache_n_comments()
def check_commenting(self, request):
"""
Check whether the given request (HTTP or DRF) is allowed to comment on this Commentable.
If commenting is not allowed, the function must raise a ValidationError.
It must never return a value other than None.
"""
is_authenticated = request.user.is_authenticated
if self.commenting == Commenting.NONE:
raise ValidationError(_("%s does not allow commenting") % self, code="commenting_none")
elif self.commenting == Commenting.REGISTERED:
if not is_authenticated:
raise ValidationError(_("%s does not allow anonymous commenting") % self, code="commenting_registered")
elif self.commenting == Commenting.STRONG:
if not is_authenticated:
raise ValidationError(_("%s requires strong authentication for commenting") % self,
code="commenting_registered_strong")
elif not request.user.has_strong_auth and not request.user.get_default_organization():
raise ValidationError(_("%s requires strong authentication for commenting") % self,
code="commenting_registered_strong")
elif self.commenting == Commenting.OPEN:
return
else: # pragma: no cover
raise NotImplementedError("Not implemented")
def check_voting(self, request):
"""
Check whether the given request (HTTP or DRF) is allowed to vote on this Commentable.
If voting is not allowed, the function must raise a ValidationError.
It must never return a value other than None.
"""
is_authenticated = request.user.is_authenticated
if self.voting == Commenting.NONE:
raise ValidationError(_("%s does not allow voting") % self, code="voting_none")
elif self.voting == Commenting.REGISTERED:
if not is_authenticated:
raise ValidationError(_("%s does not allow anonymous voting") % self, code="voting_registered")
elif self.voting == Commenting.STRONG:
if not is_authenticated:
raise ValidationError(_("%s requires strong authentication for voting") % self,
code="voting_registered_strong")
elif not request.user.has_strong_auth and not request.user.get_default_organization():
raise ValidationError(_("%s requires strong authentication for voting") % self,
code="voting_registered_strong")
elif self.voting == Commenting.OPEN:
return
else: # pragma: no cover
raise NotImplementedError("Not implemented")
class Meta:
abstract = True
| {
"repo_name": "City-of-Helsinki/kerrokantasi",
"path": "democracy/models/base.py",
"copies": "1",
"size": "7831",
"license": "mit",
"hash": -1523579263042091500,
"line_mean": 39.5751295337,
"line_max": 119,
"alpha_frac": 0.6368279913,
"autogenerated": false,
"ratio": 4.360244988864142,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013421674993147493,
"num_lines": 193
} |
from functools import lru_cache
from django.contrib.auth import get_permission_codename
from django.db.models import Q
from froide.team.models import Team
AUTH_MAPPING = {
'read': 'view',
'write': 'change',
}
def check_permission(obj, request, verb):
user = request.user
if not user.is_staff:
return False
capability = AUTH_MAPPING.get(verb, verb)
opts = obj._meta
codename = get_permission_codename(capability, opts)
if user.has_perm("%s.%s" % (opts.app_label, codename)):
return True
if user.has_perm("%s.%s" % (opts.app_label, codename), obj=obj):
return True
return False
def has_authenticated_access(obj, request, verb='write', scope=None):
user = request.user
if not user.is_authenticated:
# No authentication, no access
return False
# OAuth token
token = getattr(request, 'auth', None)
if token and (not scope or not token.is_valid([scope])):
return False
if hasattr(obj, 'user') and obj.user_id == user.id:
# The object owner always has the capability
return True
if user.is_superuser:
# Superusers can do everything
return True
if check_permission(obj, request, verb):
return True
if hasattr(obj, 'team') and obj.team and obj.team.can_do(verb, user):
return True
return False
@lru_cache()
def can_read_object(obj, request=None):
if hasattr(obj, 'is_public') and obj.is_public():
return True
if request is None:
return False
return has_authenticated_access(obj, request, verb='read')
@lru_cache()
def can_write_object(obj, request):
return has_authenticated_access(obj, request)
@lru_cache()
def can_manage_object(obj, request):
'''
Team owner permission
'''
return has_authenticated_access(obj, request, 'manage')
ACCESS_MAPPING = {
'read': can_read_object,
'write': can_write_object,
'manage': can_manage_object,
}
def can_access_object(verb, obj, request):
try:
access_func = ACCESS_MAPPING[verb]
except KeyError:
raise ValueError('Invalid auth verb')
return access_func(obj, request)
def get_read_queryset(qs, request, has_team=False, public_field=None,
scope=None):
user = request.user
filters = None
if public_field is not None:
filters = Q(**{public_field: True})
result_qs = qs.filter(filters)
else:
result_qs = qs.none()
if not user.is_authenticated:
return result_qs
# OAuth token
token = getattr(request, 'auth', None)
if token and (not scope or not token.is_valid([scope])):
# API access, but no scope
return result_qs
if user.is_superuser:
return qs
model = qs.model
opts = model._meta
codename = get_permission_codename('view', opts)
if user.is_staff and user.has_perm("%s.%s" % (opts.app_label, codename)):
return qs
teams = None
if has_team:
teams = Team.objects.get_for_user(user)
user_filter = get_user_filter(request, teams=teams)
if filters is None:
filters = user_filter
else:
filters |= user_filter
return qs.filter(filters)
def get_write_queryset(qs, request, has_team=False,
user_write_filter=None, scope=None):
user = request.user
if not user.is_authenticated:
return qs.none()
# OAuth token
token = getattr(request, 'auth', None)
if token and (not scope or not token.is_valid([scope])):
# API access, but no scope
return qs.none()
if user.is_superuser:
return qs
model = qs.model
opts = model._meta
codename = get_permission_codename('change', opts)
if user.is_staff and user.has_perm("%s.%s" % (opts.app_label, codename)):
return qs
filters = None
if user_write_filter is not None:
filters = user_write_filter
teams = None
if has_team:
teams = Team.objects.get_editor_owner_teams(user)
user_filter = get_user_filter(request, teams=teams)
if filters is None:
filters = user_filter
else:
filters |= user_filter
return qs.filter(filters)
def get_user_filter(request, teams=None):
user = request.user
filter_arg = Q(user=user)
if teams:
# or their team
filter_arg |= Q(team__in=teams)
return filter_arg
def clear_lru_caches():
for f in ACCESS_MAPPING.values():
f.cache_clear()
| {
"repo_name": "stefanw/froide",
"path": "froide/helper/auth.py",
"copies": "1",
"size": "4514",
"license": "mit",
"hash": -6740018870848722000,
"line_mean": 23.6666666667,
"line_max": 77,
"alpha_frac": 0.6240584847,
"autogenerated": false,
"ratio": 3.6848979591836737,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4808956443883674,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from django.utils.text import capfirst
from django.utils.translation import gettext as _
from wagtail.admin import messages
from wagtail.admin.edit_handlers import (
ObjectList, TabbedInterface, extract_panel_definitions_from_model_class)
from wagtail.core.models import Site
from .forms import SiteSwitchForm
from .permissions import user_can_edit_setting_type
from .registry import registry
def get_model_from_url_params(app_name, model_name):
"""
retrieve a content type from an app_name / model_name combo.
Throw Http404 if not a valid setting type
"""
model = registry.get_by_natural_key(app_name, model_name)
if model is None:
raise Http404
return model
@lru_cache()
def get_setting_edit_handler(model):
if hasattr(model, 'edit_handler'):
edit_handler = model.edit_handler
else:
panels = extract_panel_definitions_from_model_class(model, ['site'])
edit_handler = ObjectList(panels)
return edit_handler.bind_to(model=model)
def edit_current_site(request, app_name, model_name):
# Redirect the user to the edit page for the current site
# (or the current request does not correspond to a site, the first site in the list)
site_request = Site.find_for_request(request)
site = site_request or Site.objects.first()
if not site:
messages.error(request, _("This setting could not be opened because there is no site defined."))
return redirect('wagtailadmin_home')
return redirect('wagtailsettings:edit', app_name, model_name, site.pk)
def edit(request, app_name, model_name, site_pk):
model = get_model_from_url_params(app_name, model_name)
if not user_can_edit_setting_type(request.user, model):
raise PermissionDenied
site = get_object_or_404(Site, pk=site_pk)
setting_type_name = model._meta.verbose_name
instance = model.for_site(site)
edit_handler = get_setting_edit_handler(model)
edit_handler = edit_handler.bind_to(instance=instance, request=request)
form_class = edit_handler.get_form_class()
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=instance)
if form.is_valid():
form.save()
messages.success(
request,
_("%(setting_type)s updated.") % {
'setting_type': capfirst(setting_type_name),
'instance': instance
}
)
return redirect('wagtailsettings:edit', app_name, model_name, site.pk)
else:
messages.validation_error(
request, _("The setting could not be saved due to errors."), form
)
else:
form = form_class(instance=instance)
edit_handler = edit_handler.bind_to(form=form)
# Show a site switcher form if there are multiple sites
site_switcher = None
if Site.objects.count() > 1:
site_switcher = SiteSwitchForm(site, model)
return TemplateResponse(request, 'wagtailsettings/edit.html', {
'opts': model._meta,
'setting_type_name': setting_type_name,
'instance': instance,
'edit_handler': edit_handler,
'form': form,
'site': site,
'site_switcher': site_switcher,
'tabbed': isinstance(edit_handler, TabbedInterface),
})
| {
"repo_name": "zerolab/wagtail",
"path": "wagtail/contrib/settings/views.py",
"copies": "10",
"size": "3579",
"license": "bsd-3-clause",
"hash": 4990418021477917000,
"line_mean": 34.0882352941,
"line_max": 104,
"alpha_frac": 0.666387259,
"autogenerated": false,
"ratio": 3.8986928104575163,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00044120665982640345,
"num_lines": 102
} |
from functools import lru_cache
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from wagtail.admin import messages
from wagtail.admin.edit_handlers import (
ObjectList, TabbedInterface, extract_panel_definitions_from_model_class)
from wagtail.core.models import Site
from .forms import SiteSwitchForm
from .permissions import user_can_edit_setting_type
from .registry import registry
def get_model_from_url_params(app_name, model_name):
"""
retrieve a content type from an app_name / model_name combo.
Throw Http404 if not a valid setting type
"""
model = registry.get_by_natural_key(app_name, model_name)
if model is None:
raise Http404
return model
@lru_cache()
def get_setting_edit_handler(model):
if hasattr(model, 'edit_handler'):
edit_handler = model.edit_handler
else:
panels = extract_panel_definitions_from_model_class(model, ['site'])
edit_handler = ObjectList(panels)
return edit_handler.bind_to(model=model)
def edit_current_site(request, app_name, model_name):
# Redirect the user to the edit page for the current site
# (or the current request does not correspond to a site, the first site in the list)
site = request.site or Site.objects.first()
if not site:
messages.error(request, _("This setting could not be opened because there is no site defined."))
return redirect('wagtailadmin_home')
return redirect('wagtailsettings:edit', app_name, model_name, site.pk)
def edit(request, app_name, model_name, site_pk):
model = get_model_from_url_params(app_name, model_name)
if not user_can_edit_setting_type(request.user, model):
raise PermissionDenied
site = get_object_or_404(Site, pk=site_pk)
setting_type_name = model._meta.verbose_name
instance = model.for_site(site)
edit_handler = get_setting_edit_handler(model)
edit_handler = edit_handler.bind_to(instance=instance, request=request)
form_class = edit_handler.get_form_class()
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=instance)
if form.is_valid():
form.save()
messages.success(
request,
_("%(setting_type)s updated.") % {
'setting_type': capfirst(setting_type_name),
'instance': instance
}
)
return redirect('wagtailsettings:edit', app_name, model_name, site.pk)
else:
messages.validation_error(
request, _("The setting could not be saved due to errors."), form
)
else:
form = form_class(instance=instance)
edit_handler = edit_handler.bind_to(form=form)
# Show a site switcher form if there are multiple sites
site_switcher = None
if Site.objects.count() > 1:
site_switcher = SiteSwitchForm(site, model)
return render(request, 'wagtailsettings/edit.html', {
'opts': model._meta,
'setting_type_name': setting_type_name,
'instance': instance,
'edit_handler': edit_handler,
'form': form,
'site': site,
'site_switcher': site_switcher,
'tabbed': isinstance(edit_handler, TabbedInterface),
})
| {
"repo_name": "mixxorz/wagtail",
"path": "wagtail/contrib/settings/views.py",
"copies": "3",
"size": "3474",
"license": "bsd-3-clause",
"hash": 3138329064386283500,
"line_mean": 33.74,
"line_max": 104,
"alpha_frac": 0.6614853195,
"autogenerated": false,
"ratio": 3.877232142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00045003079302293153,
"num_lines": 100
} |
from functools import lru_cache
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from events.models import Event, Keyword, KeywordSet, DataSource
HELSINKI_KEYWORD_SET_DATA = {
'id': 'helsinki:audiences',
'name_en': 'Helsinki audiences',
'name_fi': 'Helsinki kohderyhmät',
'name_sv': 'Helsingfors invånargrupper',
'data_source_id': 'helsinki',
'usage': KeywordSet.AUDIENCE,
}
# keyword id mapping from hel.fi to YSO
KEYWORD_MAPPING = {
'helfi:1': ['yso:p4354', 'yso:p13050'], # lapset ja lapsiperheet -> lapset (ikään liittyvä rooli) & lapsiperheet
'helfi:2': ['yso:p11617'], # nuoret -> nuoret (ikään liittyvä rooli)
'helfi:3': ['yso:p6165'], # maahanmuuttajat -> maahanmuuttajat
'helfi:4': ['yso:p7179'], # vammaiset -> vammaiset
'helfi:5': ['yso:p2433'], # vanhukset -> ikääntyneet
'helfi:6': ['yso:p3128'], # yritykset -> yritykset
'helfi:7': ['yso:p1393'], # yhdistykset -> järjestöt
}
YSO_SOTE_KEYWORD_IDS = [
'yso:p12297', # mielenterveyspotilaat
'yso:p23886', # päihdekeskuskuntoutujat
]
NEW_SOTE_KEYWORDS_DATA = [
{
'id': 'helsinki:aflfbatkwe',
'name_fi': 'omaishoitoperheet',
'data_source_id': 'helsinki',
},
{
'id': 'helsinki:aflfbat76e',
'name_fi': 'palvelukeskuskortti',
'data_source_id': 'helsinki',
}
]
class Command(BaseCommand):
help = "Creates SOTE keywords and Helsinki audience keyword set and adds YSO audience keywords to events."
@lru_cache()
def get_keyword_obj(self, keyword_id):
try:
keyword = Keyword.objects.get(id=keyword_id)
except Keyword.DoesNotExist:
raise CommandError('keyword "%s" does not exist' % keyword_id)
return keyword
@transaction.atomic()
def create_sote_keywords(self):
self.stdout.write('creating new SOTE keywords...')
for new_keyword_data in NEW_SOTE_KEYWORDS_DATA:
keyword_set, created = Keyword.objects.update_or_create(
id=new_keyword_data['id'],
defaults=new_keyword_data
)
if created:
self.stdout.write('created keyword %s (%s)' % (new_keyword_data['name_fi'], new_keyword_data['id']))
else:
self.stdout.write('keyword %s (%s) already exist' % (new_keyword_data['name_fi'],
new_keyword_data['id']))
@transaction.atomic()
def create_helsinki_audiences_keyword_set(self):
self.stdout.write('creating Helsinki audiences keyword set...')
# create the set itself
keyword_set, created = KeywordSet.objects.update_or_create(
id=HELSINKI_KEYWORD_SET_DATA['id'],
defaults=HELSINKI_KEYWORD_SET_DATA
)
if created:
self.stdout.write('created keyword set "%s"' % HELSINKI_KEYWORD_SET_DATA['id'])
else:
self.stdout.write('keyword set "%s" already exist' % HELSINKI_KEYWORD_SET_DATA['id'])
# flatten YSO keyword IDs
yso_keyword_ids = [val for sublist in KEYWORD_MAPPING.values() for val in sublist]
# keywords to add to the set = YSO keywords corresponding to hel.fi + YSO SOTE keywords + new SOTE keywords
keyword_ids = yso_keyword_ids + YSO_SOTE_KEYWORD_IDS + [kw['id'] for kw in NEW_SOTE_KEYWORDS_DATA]
# add the keywords to the set
existing_keywords = set(keyword_set.keywords.all())
for keyword_id in keyword_ids:
keyword = self.get_keyword_obj(keyword_id)
if keyword not in existing_keywords:
keyword_set.keywords.add(keyword)
existing_keywords.add(keyword)
self.stdout.write('added %s (%s) to the keyword set' % (keyword.name, keyword_id))
@transaction.atomic()
def add_yso_audience_keywords_to_events(self):
self.stdout.write('adding YSO audience keywords to events...')
for event in Event.objects.exclude(audience__isnull=True).prefetch_related('audience'):
for audience in event.audience.all():
# if current audience is a valid hel.fi audience keyword, iterate YSO keywords corresponding to it
for yso_keyword_id in KEYWORD_MAPPING.get(audience.id, []):
yso_keyword_obj = self.get_keyword_obj(yso_keyword_id)
if yso_keyword_obj not in event.audience.all():
event.audience.add(yso_keyword_obj)
self.stdout.write('added %s (%s) to %s' % (yso_keyword_obj, yso_keyword_id, event))
def handle(self, *args, **options):
# Helsinki data source must be created if missing. Note that it is not necessarily the system data source.
# If we are creating it, it *may* still be the system data source, so it must be user editable!
helsinki_data_source_defaults = {'user_editable': True}
DataSource.objects.get_or_create(id=HELSINKI_KEYWORD_SET_DATA['data_source_id'],
defaults=helsinki_data_source_defaults)
self.create_sote_keywords()
self.create_helsinki_audiences_keyword_set()
self.add_yso_audience_keywords_to_events()
self.stdout.write('all done')
| {
"repo_name": "City-of-Helsinki/linkedevents",
"path": "events/management/commands/add_helsinki_audience.py",
"copies": "1",
"size": "5368",
"license": "mit",
"hash": -7952355949957708000,
"line_mean": 41.1653543307,
"line_max": 117,
"alpha_frac": 0.6162464986,
"autogenerated": false,
"ratio": 3.3385286783042396,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44547751769042393,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from events.models import Event, Keyword, KeywordSet
HELSINKI_KEYWORD_SET_DATA = {
'id': 'helsinki:audiences',
'name_en': 'Helsinki audiences',
'name_fi': 'Helsinki kohderyhmät',
'name_sv': 'Helsingfors invånargrupper',
'data_source_id': 'helsinki',
'usage': KeywordSet.AUDIENCE,
}
# keyword id mapping from hel.fi to YSO
KEYWORD_MAPPING = {
'helfi:1': ['yso:p4354', 'yso:p13050'], # lapset ja lapsiperheet -> lapset (ikään liittyvä rooli) & lapsiperheet
'helfi:2': ['yso:p11617'], # nuoret -> nuoret (ikään liittyvä rooli)
'helfi:3': ['yso:p6165'], # maahanmuuttajat -> maahanmuuttajat
'helfi:4': ['yso:p7179'], # vammaiset -> vammaiset
'helfi:5': ['yso:p2434'], # vanhukset -> vanhukset
'helfi:6': ['yso:p3128'], # yritykset -> yritykset
'helfi:7': ['yso:p1393'], # yhdistykset -> järjestöt
}
YSO_SOTE_KEYWORD_IDS = [
'yso:p12297', # mielenterveyspotilaat
'yso:p23886', # päihdekeskuskuntoutujat
]
NEW_SOTE_KEYWORDS_DATA = [
{
'id': 'helsinki:aflfbatkwe',
'name_fi': 'omaishoitoperheet',
'data_source_id': 'helsinki',
},
{
'id': 'helsinki:aflfbat76e',
'name_fi': 'palvelukeskuskortti',
'data_source_id': 'helsinki',
}
]
class Command(BaseCommand):
help = "Creates SOTE keywords and Helsinki audience keyword set and adds YSO audience keywords to events."
@lru_cache()
def get_keyword_obj(self, keyword_id):
try:
keyword = Keyword.objects.get(id=keyword_id)
except Keyword.DoesNotExist:
raise CommandError('keyword "%s" does not exist' % keyword_id)
return keyword
@transaction.atomic()
def create_sote_keywords(self):
self.stdout.write('creating new SOTE keywords...')
for new_keyword_data in NEW_SOTE_KEYWORDS_DATA:
keyword_set, created = Keyword.objects.update_or_create(
id=new_keyword_data['id'],
defaults=new_keyword_data
)
if created:
self.stdout.write('created keyword %s (%s)' % (new_keyword_data['name_fi'], new_keyword_data['id']))
else:
self.stdout.write('keyword %s (%s) already exist' % (new_keyword_data['name_fi'],
new_keyword_data['id']))
@transaction.atomic()
def create_helsinki_audiences_keyword_set(self):
self.stdout.write('creating Helsinki audiences keyword set...')
# create the set itself
keyword_set, created = KeywordSet.objects.update_or_create(
id=HELSINKI_KEYWORD_SET_DATA['id'],
defaults=HELSINKI_KEYWORD_SET_DATA
)
if created:
self.stdout.write('created keyword set "%s"' % HELSINKI_KEYWORD_SET_DATA['id'])
else:
self.stdout.write('keyword set "%s" already exist' % HELSINKI_KEYWORD_SET_DATA['id'])
# flatten YSO keyword IDs
yso_keyword_ids = [val for sublist in KEYWORD_MAPPING.values() for val in sublist]
# keywords to add to the set = YSO keywords corresponding to hel.fi + YSO SOTE keywords + new SOTE keywords
keyword_ids = yso_keyword_ids + YSO_SOTE_KEYWORD_IDS + [kw['id'] for kw in NEW_SOTE_KEYWORDS_DATA]
# add the keywords to the set
existing_keywords = set(keyword_set.keywords.all())
for keyword_id in keyword_ids:
keyword = self.get_keyword_obj(keyword_id)
if keyword not in existing_keywords:
keyword_set.keywords.add(keyword)
existing_keywords.add(keyword)
self.stdout.write('added %s (%s) to the keyword set' % (keyword.name, keyword_id))
@transaction.atomic()
def add_yso_audience_keywords_to_events(self):
self.stdout.write('adding YSO audience keywords to events...')
for event in Event.objects.exclude(audience__isnull=True).prefetch_related('audience'):
for audience in event.audience.all():
# if current audience is a valid hel.fi audience keyword, iterate YSO keywords corresponding to it
for yso_keyword_id in KEYWORD_MAPPING.get(audience.id, []):
yso_keyword_obj = self.get_keyword_obj(yso_keyword_id)
if yso_keyword_obj not in event.audience.all():
event.audience.add(yso_keyword_obj)
self.stdout.write('added %s (%s) to %s' % (yso_keyword_obj, yso_keyword_id, event))
def handle(self, *args, **options):
self.create_sote_keywords()
self.create_helsinki_audiences_keyword_set()
self.add_yso_audience_keywords_to_events()
self.stdout.write('all done')
| {
"repo_name": "tuomas777/linkedevents",
"path": "events/management/commands/add_helsinki_audience.py",
"copies": "2",
"size": "4899",
"license": "bsd-3-clause",
"hash": -4053264242396634000,
"line_mean": 39.0655737705,
"line_max": 117,
"alpha_frac": 0.6123158756,
"autogenerated": false,
"ratio": 3.2893674293405115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4901683304940512,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from events.models import Keyword, KeywordSet, DataSource
HELFI_KEYWORD_SET_DATA = {
'id': 'helfi:topics',
'name_en': 'www.hel.fi themes',
'name_fi': 'www.hel.fi-aihepiirit',
'name_sv': 'www.hel.fi-teman',
'data_source_id': 'helfi',
'usage': KeywordSet.KEYWORD,
}
NEW_HELFI_KEYWORDS_DATA = [
{
'id': 'helfi:8',
'name_fi': 'Kaupunki ja hallinto',
'name_sv': 'Staden och förvaltning',
'name_en': 'City administration',
'data_source_id': 'helfi',
},
{
'id': 'helfi:9',
'name_fi': 'Sosiaali- ja terveyspalvelut',
'name_sv': 'Social- och hälsovård',
'name_en': 'Social services and health care',
'data_source_id': 'helfi',
},
{
'id': 'helfi:10',
'name_fi': 'Liikenne ja kartat',
'name_sv': 'Kartor och trafik',
'name_en': 'Maps and transport',
'data_source_id': 'helfi',
},
{
'id': 'helfi:11',
'name_fi': 'Päivähoito ja koulutus',
'name_sv': 'Dagvård och utbildning',
'name_en': 'Daycare and education',
'data_source_id': 'helfi',
},
{
'id': 'helfi:12',
'name_fi': 'Kulttuuri ja vapaa-aika',
'name_sv': 'Kultur och fritid',
'name_en': 'Culture and leisure',
'data_source_id': 'helfi',
},
{
'id': 'helfi:13',
'name_fi': 'Asuminen ja ympäristö',
'name_sv': 'Boende och miljö',
'name_en': 'Housing and environment',
'data_source_id': 'helfi',
},
]
class Command(BaseCommand):
help = "Creates www.hel.fi topic keywords and keyword set used by the UI."
@lru_cache()
def get_keyword_obj(self, keyword_id):
try:
keyword = Keyword.objects.get(id=keyword_id)
except Keyword.DoesNotExist:
raise CommandError('keyword "%s" does not exist' % keyword_id)
return keyword
@transaction.atomic()
def create_helfi_keywords(self):
self.stdout.write('creating new helfi keywords...')
for new_keyword_data in NEW_HELFI_KEYWORDS_DATA:
keyword, created = Keyword.objects.update_or_create(
id=new_keyword_data['id'],
defaults=new_keyword_data
)
if created:
self.stdout.write('created keyword %s (%s)' % (new_keyword_data['name_fi'], new_keyword_data['id']))
else:
self.stdout.write('keyword %s (%s) already exists' % (new_keyword_data['name_fi'],
new_keyword_data['id']))
@transaction.atomic()
def create_helfi_topics_keyword_set(self):
self.stdout.write('creating www.hel.fi topics keyword set...')
# create the set itself
keyword_set, created = KeywordSet.objects.update_or_create(
id=HELFI_KEYWORD_SET_DATA['id'],
defaults=HELFI_KEYWORD_SET_DATA
)
if created:
self.stdout.write('created keyword set "%s"' % HELFI_KEYWORD_SET_DATA['id'])
else:
self.stdout.write('keyword set "%s" already exist' % HELFI_KEYWORD_SET_DATA['id'])
keyword_ids = [kw['id'] for kw in NEW_HELFI_KEYWORDS_DATA]
# add the keywords to the set
existing_keywords = set(keyword_set.keywords.all())
for keyword_id in keyword_ids:
keyword = self.get_keyword_obj(keyword_id)
if keyword not in existing_keywords:
keyword_set.keywords.add(keyword)
existing_keywords.add(keyword)
self.stdout.write('added %s (%s) to the keyword set' % (keyword.name, keyword_id))
def handle(self, *args, **options):
# Helfi data source must be created if missing.
DataSource.objects.get_or_create(id=HELFI_KEYWORD_SET_DATA['data_source_id'])
self.create_helfi_keywords()
self.create_helfi_topics_keyword_set()
self.stdout.write('all done')
| {
"repo_name": "City-of-Helsinki/linkedevents",
"path": "events/management/commands/add_helfi_topics.py",
"copies": "1",
"size": "4146",
"license": "mit",
"hash": 2507051638608980500,
"line_mean": 33.475,
"line_max": 116,
"alpha_frac": 0.5678027556,
"autogenerated": false,
"ratio": 3.3606823720552397,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44284851276552395,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from events.models import Keyword, KeywordSet, DataSource
HELSINKI_KEYWORD_SET_DATA = {
'id': 'helsinki:topics',
'name_en': 'Helsinki topics',
'name_fi': 'Helsinki-aihepiirit',
'name_sv': 'Helsingfors-teman',
'data_source_id': 'helsinki',
'usage': KeywordSet.KEYWORD,
}
HELSINKI_KEYWORD_IDS = [
'yso:p1235', # elokuvat
'yso:p1947', # hyvinvointi
'yso:p14004', # keskustelu
'yso:p11185', # konsertit
'yso:p360', # kulttuuritapahtumat
'yso:p2739', # kuvataide
'yso:p316', # leikkiminen
'yso:p916', # liikunta
'yso:p15875', # luennot
'yso:p1808', # musiikki
'yso:p5121', # näyttelyt
'yso:p2149', # opastus
'yso:p10727', # osallistuminen
'yso:p6062', # pelit
'yso:p3670', # ruoka
'yso:p1278', # tanssi
'yso:p2625', # teatteritaide
'yso:p19245', # työpajat
'yso:p2771', # ulkoilu
'yso:p965' # urheilu
]
class Command(BaseCommand):
help = "Creates Helsinki topics keyword set."
@lru_cache()
def get_keyword_obj(self, keyword_id):
try:
keyword = Keyword.objects.get(id=keyword_id)
except Keyword.DoesNotExist:
raise CommandError('keyword "%s" does not exist' % keyword_id)
return keyword
@transaction.atomic()
def create_helsinki_topics_keyword_set(self):
self.stdout.write('creating Helsinki topics keyword set...')
# create the set itself
keyword_set, created = KeywordSet.objects.update_or_create(
id=HELSINKI_KEYWORD_SET_DATA['id'],
defaults=HELSINKI_KEYWORD_SET_DATA
)
if created:
self.stdout.write('created keyword set "%s"' % HELSINKI_KEYWORD_SET_DATA['id'])
else:
self.stdout.write('keyword set "%s" already exist' % HELSINKI_KEYWORD_SET_DATA['id'])
# add the keywords to the set
existing_keywords = set(keyword_set.keywords.all())
for keyword_id in HELSINKI_KEYWORD_IDS:
keyword = self.get_keyword_obj(keyword_id)
if keyword not in existing_keywords:
keyword_set.keywords.add(keyword)
existing_keywords.add(keyword)
self.stdout.write('added %s (%s) to the keyword set' % (keyword.name, keyword_id))
def handle(self, *args, **options):
# Helsinki data source must be created if missing. Note that it is not necessarily the system data source.
# If we are creating it, it *may* still be the system data source, so it must be user editable!
helsinki_data_source_defaults = {'user_editable': True}
DataSource.objects.get_or_create(id=HELSINKI_KEYWORD_SET_DATA['data_source_id'],
defaults=helsinki_data_source_defaults)
self.create_helsinki_topics_keyword_set()
self.stdout.write('all done')
| {
"repo_name": "City-of-Helsinki/linkedevents",
"path": "events/management/commands/add_helsinki_topics.py",
"copies": "1",
"size": "3012",
"license": "mit",
"hash": 6162979680510005000,
"line_mean": 35.265060241,
"line_max": 114,
"alpha_frac": 0.6255813953,
"autogenerated": false,
"ratio": 3.1321540062434963,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.925333375528995,
"avg_score": 0.0008803292507093684,
"num_lines": 83
} |
from functools import lru_cache
from django import http
from django.conf import settings
from catalog import typing
from catalog.context.base import Products, Tags
from images.models import ImageQuerySet
from refarm_pagination.context import PaginationContext
class ProductBrands(Products):
def __init__(self, products: typing.Products, tags: Tags):
super().__init__(products)
self._tags = tags
def context(self):
products = list(self._products)
brands = self._tags.qs().get_brands(products)
product_brands = {
product.id: brands.get(product)
for product in products
}
return {
'product_brands': product_brands,
}
class ProductImages(Products):
def __init__(self, products: typing.Products, images: ImageQuerySet):
super().__init__(products)
self._images = images
def context(self):
page_product_map = {
product.page: product
for product in self.products
}
images = self._images.get_main_images_by_pages(page_product_map.keys())
product_images = {
product.id: images.get(page)
for page, product in page_product_map.items()
}
return {
'product_images': product_images,
}
class PaginatedProducts(Products):
"""Slice products and add pagination data to a context."""
def __init__(
self, products: typing.Products,
url: str, page_number: int, per_page: int
):
if (
page_number < 1 or
per_page not in settings.CATEGORY_STEP_MULTIPLIERS
):
raise http.Http404('Page does not exist.')
super().__init__(products)
self._pagination = PaginationContext(url, page_number, per_page, products)
@lru_cache()
def _pagination_context(self):
return self._pagination.context()
@property
def products(self):
return self._pagination_context()['page'].object_list
def context(self):
return {
**super().context(),
'paginated': self._pagination_context(),
}
| {
"repo_name": "fidals/refarm-site",
"path": "catalog/context/products.py",
"copies": "1",
"size": "2173",
"license": "mit",
"hash": 2764574087406159400,
"line_mean": 25.5,
"line_max": 82,
"alpha_frac": 0.5996318454,
"autogenerated": false,
"ratio": 4.294466403162056,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5394098248562056,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from django import template
from django.conf import settings
from django.forms.formsets import BaseFormSet
from django.template.loader import get_template
from crispy_forms.helper import FormHelper
from crispy_forms.utils import TEMPLATE_PACK, get_template_pack
register = template.Library()
# We import the filters, so they are available when doing load crispy_forms_tags
from crispy_forms.templatetags.crispy_forms_filters import * # NOQA: F403,F401, E402 isort:skip
class ForLoopSimulator:
"""
Simulates a forloop tag, precisely::
{% for form in formset.forms %}
If `{% crispy %}` is rendering a formset with a helper, We inject a `ForLoopSimulator` object
in the context as `forloop` so that formset forms can do things like::
Fieldset("Item {{ forloop.counter }}", [...])
HTML("{% if forloop.first %}First form text{% endif %}"
"""
def __init__(self, formset):
self.len_values = len(formset.forms)
# Shortcuts for current loop iteration number.
self.counter = 1
self.counter0 = 0
# Reverse counter iteration numbers.
self.revcounter = self.len_values
self.revcounter0 = self.len_values - 1
# Boolean values designating first and last times through loop.
self.first = True
self.last = 0 == self.len_values - 1
def iterate(self):
"""
Updates values as if we had iterated over the for
"""
self.counter += 1
self.counter0 += 1
self.revcounter -= 1
self.revcounter0 -= 1
self.first = False
self.last = self.revcounter0 == self.len_values - 1
class BasicNode(template.Node):
"""
Basic Node object that we can rely on for Node objects in normal
template tags. I created this because most of the tags we'll be using
will need both the form object and the helper string. This handles
both the form object and parses out the helper string into attributes
that templates can easily handle.
"""
def __init__(self, form, helper, template_pack=None):
self.form = form
if helper is not None:
self.helper = helper
else:
self.helper = None
self.template_pack = template_pack or get_template_pack()
def get_render(self, context):
"""
Returns a `Context` object with all the necessary stuff for rendering the form
:param context: `django.template.Context` variable holding the context for the node
`self.form` and `self.helper` are resolved into real Python objects resolving them
from the `context`. The `actual_form` can be a form or a formset. If it's a formset
`is_formset` is set to True. If the helper has a layout we use it, for rendering the
form or the formset's forms.
"""
# Nodes are not thread safe in multithreaded environments
# https://docs.djangoproject.com/en/dev/howto/custom-template-tags/#thread-safety-considerations
if self not in context.render_context:
context.render_context[self] = (
template.Variable(self.form),
template.Variable(self.helper) if self.helper else None,
)
form, helper = context.render_context[self]
actual_form = form.resolve(context)
if self.helper is not None:
helper = helper.resolve(context)
else:
# If the user names the helper within the form `helper` (standard), we use it
# This allows us to have simplified tag syntax: {% crispy form %}
helper = FormHelper() if not hasattr(actual_form, "helper") else actual_form.helper
# use template_pack from helper, if defined
try:
if helper.template_pack:
self.template_pack = helper.template_pack
except AttributeError:
pass
self.actual_helper = helper
# We get the response dictionary
is_formset = isinstance(actual_form, BaseFormSet)
response_dict = self.get_response_dict(helper, context, is_formset)
node_context = context.__copy__()
node_context.update({"is_bound": actual_form.is_bound})
node_context.update(response_dict)
final_context = node_context.__copy__()
# If we have a helper's layout we use it, for the form or the formset's forms
if helper and helper.layout:
if not is_formset:
actual_form.form_html = helper.render_layout(
actual_form, node_context, template_pack=self.template_pack
)
else:
forloop = ForLoopSimulator(actual_form)
helper.render_hidden_fields = True
for form in actual_form:
node_context.update({"forloop": forloop})
node_context.update({"formset_form": form})
form.form_html = helper.render_layout(form, node_context, template_pack=self.template_pack)
forloop.iterate()
if is_formset:
final_context["formset"] = actual_form
else:
final_context["form"] = actual_form
return final_context
def get_response_dict(self, helper, context, is_formset):
"""
Returns a dictionary with all the parameters necessary to render the form/formset in a template.
:param context: `django.template.Context` for the node
:param is_formset: Boolean value. If set to True, indicates we are working with a formset.
"""
if not isinstance(helper, FormHelper):
raise TypeError("helper object provided to {% crispy %} tag must be a crispy.helper.FormHelper object.")
attrs = helper.get_attributes(template_pack=self.template_pack)
form_type = "form"
if is_formset:
form_type = "formset"
# We take form/formset parameters from attrs if they are set, otherwise we use defaults
response_dict = {
"%s_action" % form_type: attrs["attrs"].get("action", ""),
"%s_attrs" % form_type: attrs.get("attrs", ""),
"%s_class" % form_type: attrs["attrs"].get("class", ""),
"%s_id" % form_type: attrs["attrs"].get("id", ""),
"%s_method" % form_type: attrs.get("form_method", "post"),
"%s_style" % form_type: attrs.get("form_style", None),
"%s_tag" % form_type: attrs.get("form_tag", True),
"disable_csrf": attrs.get("disable_csrf", False),
"error_text_inline": attrs.get("error_text_inline", True),
"field_class": attrs.get("field_class", ""),
"field_template": attrs.get("field_template", ""),
"flat_attrs": attrs.get("flat_attrs", ""),
"form_error_title": attrs.get("form_error_title", None),
"form_show_errors": attrs.get("form_show_errors", True),
"form_show_labels": attrs.get("form_show_labels", True),
"formset_error_title": attrs.get("formset_error_title", None),
"help_text_inline": attrs.get("help_text_inline", False),
"html5_required": attrs.get("html5_required", False),
"include_media": attrs.get("include_media", True),
"inputs": attrs.get("inputs", []),
"is_formset": is_formset,
"label_class": attrs.get("label_class", ""),
"template_pack": self.template_pack,
}
# Handles custom attributes added to helpers
for attribute_name, value in attrs.items():
if attribute_name not in response_dict:
response_dict[attribute_name] = value
if "csrf_token" in context:
response_dict["csrf_token"] = context["csrf_token"]
return response_dict
@lru_cache()
def whole_uni_formset_template(template_pack=TEMPLATE_PACK):
return get_template("%s/whole_uni_formset.html" % template_pack)
@lru_cache()
def whole_uni_form_template(template_pack=TEMPLATE_PACK):
return get_template("%s/whole_uni_form.html" % template_pack)
class CrispyFormNode(BasicNode):
def render(self, context):
c = self.get_render(context).flatten()
if self.actual_helper is not None and getattr(self.actual_helper, "template", False):
template = get_template(self.actual_helper.template)
else:
if c["is_formset"]:
template = whole_uni_formset_template(self.template_pack)
else:
template = whole_uni_form_template(self.template_pack)
return template.render(c)
# {% crispy %} tag
@register.tag(name="crispy")
def do_uni_form(parser, token):
"""
You need to pass in at least the form/formset object, and can also pass in the
optional `crispy_forms.helpers.FormHelper` object.
helper (optional): A `crispy_forms.helper.FormHelper` object.
Usage::
{% load crispy_tags %}
{% crispy form form.helper %}
You can also provide the template pack as the third argument::
{% crispy form form.helper 'bootstrap' %}
If the `FormHelper` attribute is named `helper` you can simply do::
{% crispy form %}
{% crispy form 'bootstrap' %}
"""
token = token.split_contents()
form = token.pop(1)
helper = None
template_pack = "'%s'" % get_template_pack()
# {% crispy form helper %}
try:
helper = token.pop(1)
except IndexError:
pass
# {% crispy form helper 'bootstrap' %}
try:
template_pack = token.pop(1)
except IndexError:
pass
# {% crispy form 'bootstrap' %}
if helper is not None and isinstance(helper, str) and ("'" in helper or '"' in helper):
template_pack = helper
helper = None
if template_pack is not None:
template_pack = template_pack[1:-1]
ALLOWED_TEMPLATE_PACKS = getattr(
settings, "CRISPY_ALLOWED_TEMPLATE_PACKS", ("bootstrap", "uni_form", "bootstrap3", "bootstrap4")
)
if template_pack not in ALLOWED_TEMPLATE_PACKS:
raise template.TemplateSyntaxError(
"crispy tag's template_pack argument should be in %s" % str(ALLOWED_TEMPLATE_PACKS)
)
return CrispyFormNode(form, helper, template_pack=template_pack)
| {
"repo_name": "django-crispy-forms/django-crispy-forms",
"path": "crispy_forms/templatetags/crispy_forms_tags.py",
"copies": "2",
"size": "10384",
"license": "mit",
"hash": -7153359252313214000,
"line_mean": 37.1764705882,
"line_max": 116,
"alpha_frac": 0.6126733436,
"autogenerated": false,
"ratio": 4.054666146036705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5667339489636705,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from django import template
from django.conf import settings
from django.forms import boundfield
from django.forms.formsets import BaseFormSet
from django.template import Context
from django.template.loader import get_template
from django.utils.safestring import mark_safe
from crispy_forms.exceptions import CrispyError
from crispy_forms.utils import TEMPLATE_PACK, flatatt
@lru_cache()
def uni_formset_template(template_pack=TEMPLATE_PACK):
return get_template("%s/uni_formset.html" % template_pack)
@lru_cache()
def uni_form_template(template_pack=TEMPLATE_PACK):
return get_template("%s/uni_form.html" % template_pack)
register = template.Library()
@register.filter(name="crispy")
def as_crispy_form(form, template_pack=TEMPLATE_PACK, label_class="", field_class=""):
"""
The original and still very useful way to generate a div elegant form/formset::
{% load crispy_forms_tags %}
<form class="uniForm" method="post">
{% csrf_token %}
{{ myform|crispy }}
</form>
or, if you want to explicitly set the template pack::
{{ myform|crispy:"bootstrap" }}
In ``bootstrap3`` or ``bootstrap4`` for horizontal forms you can do::
{{ myform|label_class:"col-lg-2",field_class:"col-lg-8" }}
"""
c = Context(
{
"field_class": field_class,
"field_template": "%s/field.html" % template_pack,
"form_show_errors": True,
"form_show_labels": True,
"label_class": label_class,
}
).flatten()
if isinstance(form, BaseFormSet):
template = uni_formset_template(template_pack)
c["formset"] = form
else:
template = uni_form_template(template_pack)
c["form"] = form
return template.render(c)
@register.filter(name="as_crispy_errors")
def as_crispy_errors(form, template_pack=TEMPLATE_PACK):
"""
Renders only form errors the same way as django-crispy-forms::
{% load crispy_forms_tags %}
{{ form|as_crispy_errors }}
or::
{{ form|as_crispy_errors:"bootstrap" }}
"""
if isinstance(form, BaseFormSet):
template = get_template("%s/errors_formset.html" % template_pack)
c = Context({"formset": form}).flatten()
else:
template = get_template("%s/errors.html" % template_pack)
c = Context({"form": form}).flatten()
return template.render(c)
@register.filter(name="as_crispy_field")
def as_crispy_field(field, template_pack=TEMPLATE_PACK, label_class="", field_class=""):
"""
Renders a form field like a django-crispy-forms field::
{% load crispy_forms_tags %}
{{ form.field|as_crispy_field }}
or::
{{ form.field|as_crispy_field:"bootstrap" }}
"""
if not isinstance(field, boundfield.BoundField) and settings.DEBUG:
raise CrispyError("|as_crispy_field got passed an invalid or inexistent field")
attributes = {
"field": field,
"form_show_errors": True,
"form_show_labels": True,
"label_class": label_class,
"field_class": field_class,
}
helper = getattr(field.form, "helper", None)
template_path = None
if helper is not None:
attributes.update(helper.get_attributes(template_pack))
template_path = helper.field_template
if not template_path:
template_path = "%s/field.html" % template_pack
template = get_template(template_path)
c = Context(attributes).flatten()
return template.render(c)
@register.filter(name="flatatt")
def flatatt_filter(attrs):
return mark_safe(flatatt(attrs))
@register.filter
def optgroups(field):
"""
A template filter to help rendering of fields with optgroups.
Returns:
A tuple of label, option, index
label: Group label for grouped optgroups (`None` if inputs are not
grouped).
option: A dict containing information to render the option::
{
"name": "checkbox_select_multiple",
"value": 1,
"label": 1,
"selected": False,
"index": "0",
"attrs": {"id": "id_checkbox_select_multiple_0"},
"type": "checkbox",
"template_name": "django/forms/widgets/checkbox_option.html",
"wrap_label": True,
}
index: Group index`
"""
id_ = field.field.widget.attrs.get("id") or field.auto_id
attrs = {"id": id_} if id_ else {}
attrs = field.build_widget_attrs(attrs)
values = field.field.widget.format_value(field.value())
return field.field.widget.optgroups(field.html_name, values, attrs)
| {
"repo_name": "maraujop/django-crispy-forms",
"path": "crispy_forms/templatetags/crispy_forms_filters.py",
"copies": "2",
"size": "4743",
"license": "mit",
"hash": -2199933348697774000,
"line_mean": 29.0189873418,
"line_max": 88,
"alpha_frac": 0.6209150327,
"autogenerated": false,
"ratio": 3.767275615567911,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5388190648267911,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from django.utils.crypto import salted_hmac, constant_time_compare
from django.urls import reverse
from django.conf import settings
from crossdomainmedia import CrossDomainMediaAuth
from froide.helper.auth import (
can_read_object, can_write_object,
can_manage_object, has_authenticated_access,
)
from .models import FoiRequest
@lru_cache()
def can_read_foirequest(foirequest, request, allow_code=True):
if foirequest.visibility == FoiRequest.INVISIBLE:
return False
if can_read_object(foirequest, request):
return True
if allow_code:
return can_read_foirequest_anonymous(foirequest, request)
return False
@lru_cache()
def can_read_foirequest_authenticated(foirequest, request, allow_code=True):
user = request.user
if has_authenticated_access(foirequest, request, verb='read',
scope='read:request'):
return True
if user.is_staff and user.has_perm('foirequest.see_private'):
return True
if foirequest.project:
return has_authenticated_access(
foirequest.project, request, verb='read',
scope='read:request'
)
# if authenticated may still have code
if allow_code:
return can_read_foirequest_anonymous(foirequest, request)
return False
def can_read_foiproject(foiproject, request):
return can_read_object(foiproject, request)
@lru_cache()
def can_write_foirequest(foirequest, request):
if can_write_object(foirequest, request):
return True
if foirequest.project:
return can_write_foiproject(foirequest.project, request)
return False
def can_manage_foirequest(foirequest, request):
return can_manage_object(foirequest, request)
def can_write_foiproject(foiproject, request):
return can_write_object(foiproject, request)
def can_manage_foiproject(foiproject, request):
return can_manage_object(foiproject, request)
def can_read_foirequest_anonymous(foirequest, request):
pb_auth = request.session.get('pb_auth')
if pb_auth is not None:
return check_foirequest_auth_code(foirequest, pb_auth)
return False
def get_foirequest_auth_code(foirequest):
return salted_hmac("FoiRequestPublicBodyAuth",
'%s#%s' % (foirequest.id, foirequest.secret_address)).hexdigest()
def get_foirequest_upload_code(foirequest):
return salted_hmac("FoiRequestPublicBodyUpload",
'%s#%s' % (foirequest.id, foirequest.secret_address)).hexdigest()
def check_foirequest_auth_code(foirequest, code):
return constant_time_compare(code, get_foirequest_auth_code(foirequest))
def check_foirequest_upload_code(foirequest, code):
return constant_time_compare(code, get_foirequest_upload_code(foirequest))
def is_attachment_public(foirequest, attachment):
return can_read_object(foirequest) and attachment.approved
def clear_lru_caches():
for f in (can_write_foirequest, can_read_foirequest,
can_read_foirequest_authenticated):
f.cache_clear()
def has_attachment_access(request, foirequest, attachment):
if not can_read_foirequest(foirequest, request):
return False
if not attachment.approved:
# allow only approved attachments to be read
# do not allow anonymous authentication here
allowed = can_read_foirequest_authenticated(
foirequest, request, allow_code=False
)
if not allowed:
return False
return True
def get_accessible_attachment_url(foirequest, attachment):
needs_authorization = not is_attachment_public(foirequest, attachment)
return attachment.get_absolute_domain_file_url(
authorized=needs_authorization
)
class AttachmentCrossDomainMediaAuth(CrossDomainMediaAuth):
'''
Create your own custom CrossDomainMediaAuth class
and implement at least these methods
'''
TOKEN_MAX_AGE_SECONDS = settings.FOI_MEDIA_TOKEN_EXPIRY
SITE_URL = settings.SITE_URL
DEBUG = False
def is_media_public(self):
'''
Determine if the media described by self.context
needs authentication/authorization at all
'''
ctx = self.context
return is_attachment_public(ctx['foirequest'], ctx['object'])
def has_perm(self, request):
ctx = self.context
obj = ctx['object']
foirequest = ctx['foirequest']
return has_attachment_access(request, foirequest, obj)
def get_auth_url(self):
'''
Give URL path to authenticating view
for the media described in context
'''
obj = self.context['object']
return reverse('foirequest-auth_message_attachment',
kwargs={
'message_id': obj.belongs_to_id,
'attachment_name': obj.name
})
def get_full_auth_url(self):
return super().get_full_auth_url() + '?download'
def get_media_file_path(self):
'''
Return the URL path relative to MEDIA_ROOT for debug mode
'''
obj = self.context['object']
return obj.file.name
| {
"repo_name": "stefanw/froide",
"path": "froide/foirequest/auth.py",
"copies": "1",
"size": "5149",
"license": "mit",
"hash": 2059938595774656000,
"line_mean": 27.9269662921,
"line_max": 78,
"alpha_frac": 0.6791610021,
"autogenerated": false,
"ratio": 3.7474526928675402,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.492661369496754,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from fastapi import Depends
from opentrons.api import MainRouter
from opentrons.hardware_control import ThreadManager, ThreadedAsyncLock
from robot_server.service.session.manager import SessionManager
from robot_server.service.legacy.rpc import RPCServer
from . import HARDWARE_APP_KEY
# The single instance of the RPCServer
_rpc_server_instance = None
# The single instance of the SessionManager
_session_manager_inst = None
async def get_hardware() -> ThreadManager:
"""Hardware dependency"""
from .app import app
# todo Amit 2/11/2020. This function should create and return a singleton
# hardware interface.
return app.extra[HARDWARE_APP_KEY] # type: ignore
@lru_cache(maxsize=1)
def get_motion_lock() -> ThreadedAsyncLock:
"""
Get the single motion lock.
:return: a threaded async lock
"""
return ThreadedAsyncLock()
async def get_rpc_server() -> RPCServer:
"""The RPC Server instance"""
global _rpc_server_instance
if not _rpc_server_instance:
h = await get_hardware()
root = MainRouter(h, lock=get_motion_lock())
_rpc_server_instance = RPCServer(None, root)
return _rpc_server_instance
def get_session_manager(hardware: ThreadManager = Depends(get_hardware)) \
-> SessionManager:
"""The single session manager instance"""
global _session_manager_inst
if not _session_manager_inst:
_session_manager_inst = SessionManager(hardware=hardware)
return _session_manager_inst
| {
"repo_name": "OpenTrons/opentrons_sdk",
"path": "robot-server/robot_server/service/dependencies.py",
"copies": "1",
"size": "1535",
"license": "apache-2.0",
"hash": -2461046836795708000,
"line_mean": 27.9622641509,
"line_max": 77,
"alpha_frac": 0.7185667752,
"autogenerated": false,
"ratio": 3.8375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.50560667752,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from gfxutils import Color, V2
from . import core
from .image import Image, Sprite
class Font:
def __init__(self, renderer, filename, size=24):
self.renderer = renderer
self.font = core.open_font(filename.encode('utf-8'), size)
@lru_cache()
def render(self, text, color=Color(1, 1, 1)):
c = core.Color(int(color.red * 255),
int(color.green * 255),
int(color.blue * 255),
int(color.alpha * 255))
frames = 1
renderer = self.renderer
surface = core.render_text(self.font, text.encode('utf-8'), c)
size = V2(surface.contents.w, surface.contents.h)
texture = core.create_texture_from_surface(renderer, surface)
core.free_surface(surface)
return Image(renderer, texture, size, frames)
def write(self, text, position, effect=None, centered=True):
image = self.render(text)
sprite = Sprite(image, position, effect=effect, centered=centered)
sprite.draw()
| {
"repo_name": "Steven-Wilson/pyxlgame",
"path": "font.py",
"copies": "1",
"size": "1075",
"license": "mit",
"hash": -9098668530189352000,
"line_mean": 32.59375,
"line_max": 74,
"alpha_frac": 0.6046511628,
"autogenerated": false,
"ratio": 3.7587412587412588,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4863392421541259,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from gfxutils import V2, Color, Rect
from . import core
DRAW_BORDERS = False
class Image:
'Stores image data -- Cannot be drawn directly'
def __init__(self, renderer, texture, size, frames):
self.renderer = renderer
self.texture = texture
self.size = size
self.frames = frames
def __del__(self):
core.destroy_texture(self.texture)
@property
def color(self):
r = core.c_uint8(0)
g = core.c_uint8(0)
b = core.c_uint8(0)
a = core.c_uint8(0)
core.get_texture_color_mod(self.texture, core.byref(r), core.byref(g), core.byref(b))
core.get_texture_alpha_mod(self.texture, core.byref(a))
return Color(r.value / 255,
g.value / 255,
b.value / 255,
a.value / 255)
@color.setter
def color(self, other):
core.set_textute_color_mod(self.texture,
int(other.red * 255),
int(other.green * 255),
int(other.blue * 255))
core.set_texture_alpha_mod(self.texture, int(other.alpha * 255))
@property
def blend_mode(self):
pass
@blend_mode.setter
def blend_mode(self, other):
core.set_texture_blend_mode(self.texture, other)
class Sprite:
'Wraps an Image with additional attributes needed to draw'
def __init__(self, image, position, effect=None, centered=True):
self.image = image
self.position = position
self.current_frame = 0
self.animation_progress = 0
self.size = V2(image.size.x / self.image.frames, image.size.y)
self.centered = centered
self.effect = effect
def update(self, milliseconds):
if self.effect is not None:
self.effect.update(milliseconds)
self.animation_progress += 1
if self.animation_progress > 6:
self.current_frame = (self.current_frame + 1) % self.image.frames
self.animation_progress = 0
@property
def clip_rect(self):
return core.Rect(int(self.current_frame * self.size.x),
int(0),
int(self.size.x),
int(self.size.y))
@property
def target_rect(self):
if self.centered:
return core.Rect(int(self.position.x - self.size.x / 2),
int(self.position.y - self.size.y / 2),
int(self.size.x),
int(self.size.y))
else:
return core.Rect(int(self.position.x),
int(self.position.y),
int(self.size.x),
int(self.size.y))
def draw_normal(self):
core.render_copy(self.image.renderer,
self.image.texture,
core.byref(self.clip_rect),
core.byref(self.target_rect))
if core.DRAW_BORDERS:
core.set_render_draw_color(self.image.renderer, 0, 255, 0, 255)
core.render_draw_rect(self.image.renderer,
core.byref(self.target_rect))
def draw_extended(self, degrees, center, flipped):
core.render_copy_ex(self.image.renderer,
self.image.texture,
core.byref(self.clip_rect),
core.byref(self.target_rect),
int(degrees),
core.Point(int(center.x), int(center.y)),
int(flipped))
if core.DRAW_BORDERS:
core.set_render_draw_color(self.image.renderer, 0, 255, 0, 255)
core.render_draw_rect(self.image.renderer,
core.byref(self.target_rect))
def draw(self):
if self.effect is None:
self.draw_normal()
else:
self.effect.apply(self)
def contains(self, point):
r = self.target_rect
lx, ly, ux, uy = r.x, r.y, r.x + r.w, r.y + r.h
x, y = point.x, point.y
return x >= lx and x < ux and y >= ly and y < uy
@property
def color(self):
return self.image.color
@color.setter
def color(self, other):
self.image.color = other
@property
def blend_mode(self):
pass
@blend_mode.setter
def blend_mode(self, other):
self.image.blend_mode = other
| {
"repo_name": "Steven-Wilson/pyxlgame",
"path": "image.py",
"copies": "1",
"size": "4574",
"license": "mit",
"hash": 7121355629803069000,
"line_mean": 31.2112676056,
"line_max": 93,
"alpha_frac": 0.5172715348,
"autogenerated": false,
"ratio": 3.859915611814346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48771871466143457,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from hwt.hdl.operatorDefs import AllOps
from hwtHls.allocator.allocator import HlsAllocator
from math import log2
from hwtHls.platform.opRealizationMeta import OpRealizationMeta
from hwtHls.scheduler.force_directed import ForceDirectedScheduler
from hwtHls.scheduler.list_schedueling import ListSchedueler
from hwtHls.scheduler.scheduler import HlsScheduler
from hwt.synthesizer.dummyPlatform import DummyPlatform
_OPS_T_GROWING_EXP = {
AllOps.DIV,
AllOps.POW,
AllOps.MUL,
AllOps.MOD,
}
_OPS_T_GROWING_LIN = {
AllOps.ADD,
AllOps.SUB,
AllOps.MINUS_UNARY,
AllOps.EQ,
AllOps.NE,
AllOps.GT,
AllOps.GE,
AllOps.LT,
AllOps.LE,
}
_OPS_T_GROWING_CONST = {
AllOps.NOT,
AllOps.XOR,
AllOps.AND,
AllOps.OR,
AllOps.INDEX,
AllOps.CONCAT,
}
class VirtualHlsPlatform(DummyPlatform):
"""
Platform with informations about target platform
and configuration of HLS
:note: latencies like in average 28nm FPGA
"""
def __init__(self):
super(VirtualHlsPlatform, self).__init__()
# operator: seconds to perform
self._OP_DELAYS = {
# exponentially growing with bit width
AllOps.DIV: 0.9e-9,
AllOps.POW: 0.6e-9,
AllOps.MUL: 0.6e-9,
AllOps.MOD: 0.9e-9,
# nearly constant with bit width
AllOps.NOT: 1.2e-9,
AllOps.XOR: 1.2e-9,
AllOps.AND: 1.2e-9,
AllOps.OR: 1.2e-9,
# nearly linear with bit width
AllOps.ADD: 1.5e-9,
AllOps.SUB: 1.5e-9,
AllOps.MINUS_UNARY: 1.5e-9,
AllOps.EQ: 1.5e-9,
AllOps.NE: 1.5e-9,
AllOps.GT: 1.5e-9,
AllOps.GE: 1.5e-9,
AllOps.LT: 1.5e-9,
AllOps.LE: 1.5e-9,
# depends on number of inputs and bit width
AllOps.TERNARY: 0.8e-9,
# constant
AllOps.INDEX: 0,
AllOps.CONCAT: 0,
}
self.allocator = HlsAllocator
self.scheduler = ListSchedueler #HlsScheduler #ForceDirectedScheduler
@lru_cache()
def get_op_realization(self, op, bit_width: int,
input_cnt: int, clk_period: float):
base_delay = self._OP_DELAYS[op]
if op in _OPS_T_GROWING_CONST:
latency_pre = base_delay
elif op in _OPS_T_GROWING_LIN:
latency_pre = base_delay * log2(bit_width)
elif op in _OPS_T_GROWING_EXP:
latency_pre = base_delay * bit_width
elif op == AllOps.TERNARY:
latency_pre = base_delay * log2(bit_width)
else:
raise NotImplementedError(op)
return OpRealizationMeta(latency_pre=latency_pre)
def onHlsInit(self, hls):
pass
| {
"repo_name": "Nic30/hwtHls",
"path": "hwtHls/platform/virtual.py",
"copies": "1",
"size": "2845",
"license": "mit",
"hash": -3404496479752239000,
"line_mean": 25.3425925926,
"line_max": 78,
"alpha_frac": 0.5901581722,
"autogenerated": false,
"ratio": 3.17877094972067,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.426892912192067,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from hwtHls.platform.interpolations import interpolate_area_2d, downscale_width
class AbstractXilinxPlatform():
def __init__(self):
# {inputCnt: (lut6_coef, mux7f_coef, mux8f_coef)}
self.mux_coefs = self._initMuxCoefs()
self.mux_coefs_inputs = list(self.mux_coefs.keys())
self.mux_coefs_inputs.sort()
self._initDelayCoefs()
def _initMuxCoefs(self):
"""
get mux coefs dict
:return: {inputCnt: (lut6_coef, mux7f_coef, mux8f_coef)}
"""
raise NotImplementedError(
"Override this in your implementation of platform")
def _initDelayCoefs(self):
"""
set delay coefficients
"""
raise NotImplementedError(
"Override this in your implementation of platform")
# example values to allow IDE to gues attribute types
self.ARC_DELAY = 1
self.LUT6_DELAY = 1
self.MUXF7_DELAY = 1
self.MUXF8_DELAY = 1
self.NET_DELAY = 1
self.CMP_DELAY = {1.0: (1, 1)}
self.ADD_COMB_DELAYS = {1.0: (1, 1)}
self.BITWISE_DELAY = 1
# pre
self.FF_DELAY_SETUP = 0
# post
self.FF_DELAY_HOLD = 0
@lru_cache()
def get_op_delay(self, op, bit_width: int, clk_period: float):
w = downscale_width(bit_width)
data = self.OP_DELAYS[op]
return interpolate_area_2d(data, w, clk_period)
def get_bitwise_op_delay(self, input_cnt: int, clk_period: float):
"""
delay for bitwise AND, OR, XOR etc
"""
return self.BITWISE_DELAY
@lru_cache()
def get_cmp_delay(self, bit_width: int, clk_period: float):
return interpolate_area_2d(self.CMP_DELAY, bit_width, clk_period)
@lru_cache()
def get_mux_delay(self, input_cnt: int, clk_period: float):
"""
Formula-based delay lookup for multiplexer
:return: delay of a mux based on the following formula:
delay = ARC + K1*LUT6 + K2*MUX7F + K3*MUX8F + (K1-1)*NET
"""
input_number_list = self.mux_coefs
# get the delay parameters
ARC = self.ARC_DELAY
LUT6 = self.LUT6_DELAY
MUXF7 = self.MUXF7_DELAY
MUXF8 = self.MUXF8_DELAY
NET = self.NET_DELAY
delay = 0
while input_cnt > 1:
# if input_cnt > input_number_max, we divide the mux into several
# pieces the last piece's input number <= input_number_max,
# others = input_number_max.
for piece_input_number in input_number_list:
if input_cnt <= piece_input_number:
break
# now we get the input number of current piece,
# calculate the remaining input number
input_cnt = (input_cnt + piece_input_number -
1) / piece_input_number
K1, K2, K3 = self.mux_coefs[piece_input_number]
# add delay of current piece
piece_delay = (ARC +
K1 * LUT6 +
K2 * MUXF7 +
K3 * MUXF8 +
(K1 - 1) * NET)
delay = delay + piece_delay
# add net delay if it's not the last piece
if input_cnt > 1:
delay = delay + NET
return delay
@lru_cache()
def get_add_op_delay(self, bitWidth: int, clk_period: float):
return interpolate_area_2d(self, bitWidth, clk_period)
def get_ff_delay(self, bitWidth: int, clk_period: float):
return self.FF_DELAY_SETUP
| {
"repo_name": "Nic30/hwtHls",
"path": "hwtHls/platform/xilinx/abstract.py",
"copies": "1",
"size": "3632",
"license": "mit",
"hash": 9179609975619353000,
"line_mean": 31.7207207207,
"line_max": 79,
"alpha_frac": 0.5547907489,
"autogenerated": false,
"ratio": 3.588932806324111,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9643723555224111,
"avg_score": 0,
"num_lines": 111
} |
from functools import lru_cache
from . import app, configure, request
from . import get_or_create, commit
from . import BaseIngestResource, QueryResource
from ..models.cinder import (
Snapshot, AvailabilityZone, Volume, VolumeSnapshot, VolumeStatus, VolumeState,
VolumeAttachment)
class SnapshotResource(QueryResource):
query_class = Snapshot
class AvailabilityZoneResource(QueryResource):
query_class = AvailabilityZone
class VolumeResource(QueryResource):
query_class = Volume
class VolumeSnapshotResource(QueryResource):
query_class = VolumeSnapshot
class VolumeStatusResource(QueryResource):
query_class = VolumeStatus
class VolumeStateResource(QueryResource):
query_class = VolumeState
class VolumeAttachmentResource(QueryResource):
query_class = VolumeAttachment
class IngestResource(BaseIngestResource):
def ingest(self):
"""Data ingest"""
@lru_cache(maxsize=100000)
def cache(model, **kwargs):
return get_or_create(model, **kwargs)
for message in request.get_json(force=True):
data = message["data"]
snapshot = cache(Snapshot, ts=data["timestamp"])
if "volumes" in data:
for metadata in data["volumes"]:
az = None
if "availability_zone" in metadata:
az = cache(AvailabilityZone,
name=metadata["availability_zone"])
volume = cache(
Volume,
openstack_id=metadata["id"],
availability_zone=az,
owner=metadata["user_id"],
tenant=metadata["os-vol-tenant-attr:tenant_id"])
status = cache(VolumeStatus, name=metadata["status"])
cache(VolumeState,
name=metadata["name"],
size=metadata["size"],
status=status,
snapshot=snapshot,
volume=volume)
for instance in metadata["attachments"]:
cache(VolumeAttachment,
instance=instance["server_id"],
volume=volume,
snapshot=snapshot)
if "volume_snapshots" in data:
for metadata in data["volume_snapshots"]:
cache(VolumeSnapshot,
openstack_id=metadata["id"],
name=metadata["name"],
description=metadata["description"],
size=metadata["size"],
source=metadata["volume_id"])
commit()
return "", 204
def setup():
"""Let's roll."""
resources = {
"/snapshot": SnapshotResource,
"/az": AvailabilityZoneResource,
"/volume": VolumeResource,
"/volume/snapshot": VolumeSnapshotResource,
"/volume/status": VolumeStatusResource,
"/volume/state": VolumeStateResource,
"/volume/attachment": VolumeAttachmentResource,
"/ingest": IngestResource
}
configure(resources)
setup()
| {
"repo_name": "eResearchSA/reporting-unified",
"path": "unified/apis/cinder.py",
"copies": "1",
"size": "3285",
"license": "apache-2.0",
"hash": -2861473051433048600,
"line_mean": 28.0707964602,
"line_max": 82,
"alpha_frac": 0.5482496195,
"autogenerated": false,
"ratio": 5.239234449760765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6287484069260766,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from . import app, configure, request
from . import get_or_create, commit
from . import QueryResource, BaseIngestResource
from nectar import get_domain
from ..models.keystone import (
Snapshot, Account, Tenant, Domain, Membership,
AccountReference, AccountReferenceMapping)
class AccountResource(QueryResource):
"""Account"""
query_class = Account
class TenantResource(QueryResource):
"""Tenant"""
query_class = Tenant
class DomainResource(QueryResource):
"""Domain"""
query_class = Domain
class MembershipResource(QueryResource):
"""Membership"""
query_class = Membership
class AccountReferenceResource(QueryResource):
"""Account Reference"""
query_class = AccountReference
class AccountReferenceMappingResource(QueryResource):
"""Account Reference Mapping"""
query_class = AccountReferenceMapping
class SnapshotResource(QueryResource):
"""Snapshot"""
query_class = Snapshot
class IngestResource(BaseIngestResource):
def ingest(self):
"""Ingest data."""
@lru_cache(maxsize=10000)
def cache(model, **kwargs):
return get_or_create(model, **kwargs)
for message in request.get_json(force=True):
data = message["data"]
snapshot = cache(Snapshot, ts=data["timestamp"])
for account_detail in data["users"]:
account = cache(Account, openstack_id=account_detail["id"])
if not account_detail["email"]:
continue
# Fix broken emails containing ";"
email = account_detail["email"].split(";")[0]
domain_name = get_domain(email)
domain = cache(Domain,
name=domain_name) if domain_name else None
reference = cache(AccountReference, value=email, domain=domain)
cache(AccountReferenceMapping,
account=account,
reference=reference,
snapshot=snapshot)
for tenant_detail in data["tenants"]:
tenant = cache(Tenant, openstack_id=tenant_detail["id"])
tenant.name = tenant_detail["name"]
tenant.description = tenant_detail["description"]
if "allocation_id" in tenant_detail:
try:
tenant.allocation = int(tenant_detail["allocation_id"])
except ValueError:
pass
if "users" not in tenant_detail:
continue
for member in tenant_detail["users"]:
account = cache(Account, openstack_id=member["id"])
cache(Membership,
account=account,
tenant=tenant,
snapshot=snapshot)
commit()
return "", 204
def setup():
"""Let's roll."""
resources = {
"/account": AccountResource,
"/tenant": TenantResource,
"/domain": DomainResource,
"/membership": MembershipResource,
"/reference": AccountReferenceResource,
"/mapping": AccountReferenceMappingResource,
"/snapshot": SnapshotResource,
"/ingest": IngestResource
}
configure(resources)
setup()
| {
"repo_name": "eResearchSA/reporting-unified",
"path": "unified/apis/keystone.py",
"copies": "1",
"size": "3389",
"license": "apache-2.0",
"hash": -2952160291906767000,
"line_mean": 26.7786885246,
"line_max": 79,
"alpha_frac": 0.5768663323,
"autogenerated": false,
"ratio": 4.961932650073207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 122
} |
from functools import lru_cache
from notifications_utils.columns import Columns
from notifications_utils.sanitise_text import SanitiseASCII
from .data import (
ADDITIONAL_SYNONYMS,
COUNTRIES_AND_TERRITORIES,
EUROPEAN_ISLANDS,
ROYAL_MAIL_EUROPEAN,
UK,
UK_ISLANDS,
WELSH_NAMES,
Postage,
)
class CountryMapping(Columns):
@staticmethod
@lru_cache(maxsize=2048, typed=False)
def make_key(original_key):
original_key = original_key.replace('&', 'and')
original_key = original_key.replace('+', 'and')
normalised = "".join(
character.lower() for character in original_key
if character not in " _-'’,.()"
)
if '?' in SanitiseASCII.encode(normalised):
return normalised
return SanitiseASCII.encode(normalised)
def __contains__(self, key):
if any(c.isdigit() for c in key):
# A string with a digit can’t be a country and is probably a
# postcode, so let’s do a little optimisation, skip the
# expensive string manipulation to normalise the key and say
# that there’s no matching country
return False
return super().__contains__(key)
def __getitem__(self, key):
for key_ in (key, f'the {key}', f'yr {key}', f'y {key}'):
if key_ in self:
return super().__getitem__(key_)
raise CountryNotFoundError(
f'Not a known country or territory ({key})'
)
countries = CountryMapping(dict(
COUNTRIES_AND_TERRITORIES +
UK_ISLANDS +
EUROPEAN_ISLANDS +
WELSH_NAMES +
ADDITIONAL_SYNONYMS
))
class Country():
def __init__(self, given_name):
self.canonical_name = countries[given_name]
def __eq__(self, other):
return self.canonical_name == other.canonical_name
@property
def postage_zone(self):
if self.canonical_name == UK:
return Postage.UK
if self.canonical_name in ROYAL_MAIL_EUROPEAN:
return Postage.EUROPE
return Postage.REST_OF_WORLD
class CountryNotFoundError(KeyError):
pass
| {
"repo_name": "alphagov/notifications-utils",
"path": "notifications_utils/countries/__init__.py",
"copies": "1",
"size": "2164",
"license": "mit",
"hash": 8113664969215347000,
"line_mean": 24.6666666667,
"line_max": 72,
"alpha_frac": 0.6108534323,
"autogenerated": false,
"ratio": 3.69811320754717,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.480896663984717,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from numpy import (
asarray,
atleast_2d,
concatenate,
empty,
eye,
kron,
log,
sqrt,
tensordot,
zeros_like,
)
from numpy.linalg import eigh
from optimix import Function
from ._free import FreeFormCov
from ._lrfree import LRFreeFormCov
from .._util import format_function, unvec
class Kron2SumCov(Function):
"""
Implements K = C₀ ⊗ GGᵀ + C₁ ⊗ I.
C₀ and C₁ are d×d symmetric matrices. C₀ is a semi-definite positive matrix while C₁
is a positive definite one. G is a n×m matrix and I is a n×n identity matrix. Let
M = Uₘ Sₘ Uₘᵀ be the eigen decomposition for any matrix M. The documentation and
implementation of this class make use of the following definitions:
- X = GGᵀ = Uₓ Sₓ Uₓᵀ
- C₁ = U₁ S₁ U₁ᵀ
- Cₕ = S₁⁻½ U₁ᵀ C₀ U₁ S₁⁻½
- Cₕ = Uₕ Sₕ Uₕᵀ
- D = (Sₕ ⊗ Sₓ + Iₕₓ)⁻¹
- Lₓ = Uₓᵀ
- Lₕ = Uₕᵀ S₁⁻½ U₁ᵀ
- L = Lₕ ⊗ Lₓ
The above definitions allows us to write the inverse of the covariance matrix as::
K⁻¹ = LᵀDL,
where D is a diagonal matrix.
Example
-------
.. doctest::
>>> from numpy import array
>>> from glimix_core.cov import Kron2SumCov
>>>
>>> G = array([[-1.5, 1.0], [-1.5, 1.0], [-1.5, 1.0]])
>>> Lr = array([[3], [2]], float)
>>> Ln = array([[1, 0], [2, 1]], float)
>>>
>>> cov = Kron2SumCov(G, 2, 1)
>>> cov.C0.L = Lr
>>> cov.C1.L = Ln
>>> print(cov)
Kron2SumCov(G=..., dim=2, rank=1): Kron2SumCov
LRFreeFormCov(n=2, m=1): C₀
L: [[3.]
[2.]]
FreeFormCov(dim=2): C₁
L: [[1. 0.]
[2. 1.]]
"""
def __init__(self, G, dim, rank):
"""
Constructor.
Parameters
----------
dim : int
Dimension d for the square matrices C₀ and C₁.
rank : int
Maximum rank of the C₁ matrix.
"""
self._cache = {"LhD": None}
self._C0 = LRFreeFormCov(dim, rank)
self._C0.name = "C₀"
self._C1 = FreeFormCov(dim)
self._C1.name = "C₁"
G = atleast_2d(asarray(G, float))
self._G = G
self._Sxe = None
self._Sx = None
self._Lx = None
self._LxG = None
self._diag_LxGGLx = None
self._Lxe = None
self._LxGe = None
self._diag_LxGGLxe = None
Function.__init__(
self, "Kron2SumCov", composite=[("C0", self._C0), ("C1", self._C1)]
)
self._C0.listen(self._parameters_update)
self._C1.listen(self._parameters_update)
def _init_svd(self):
from scipy.linalg import svd
from numpy_sugar.linalg import dotd
if self._Lx is not None:
return
G = self._G
U, S, _ = svd(G, check_finite=False)
S *= S
self._Sxe = S
self._Sx = concatenate((S, [0.0] * (U.shape[0] - S.shape[0])))
self._Lx = U.T
self._LxG = self._Lx @ G
self._diag_LxGGLx = dotd(self._LxG, self._LxG.T)
self._Lxe = U[:, : S.shape[0]].T
self._LxGe = self._Lxe @ G
self._diag_LxGGLxe = dotd(self._LxGe, self._LxGe.T)
@property
@lru_cache(maxsize=None)
def Ge(self):
"""
Result of US from the SVD decomposition G = USVᵀ.
"""
from scipy.linalg import svd
from numpy_sugar.linalg import ddot
U, S, _ = svd(self._G, full_matrices=False, check_finite=False)
if U.shape[1] < self._G.shape[1]:
return ddot(U, S)
return self._G
@property
@lru_cache(maxsize=None)
def _GG(self):
return self._G @ self._G.T
@property
@lru_cache(maxsize=None)
def _I(self):
return eye(self._G.shape[0])
def _parameters_update(self):
self._cache["LhD"] = None
def listen(self, func):
"""
Listen to parameters change.
Parameters
----------
func : callable
Function to be called when a parameter changes.
"""
self._C0.listen(func)
self._C1.listen(func)
@property
def Lx(self):
"""
Lₓ.
"""
self._init_svd()
return self._Lx
@property
@lru_cache(maxsize=None)
def _X(self):
return self.G @ self.G.T
@property
def _LhD(self):
"""
Implements Lₕ and D.
Returns
-------
Lh : ndarray
Uₕᵀ S₁⁻½ U₁ᵀ.
D : ndarray
(Sₕ ⊗ Sₓ + Iₕₓ)⁻¹.
"""
from numpy_sugar.linalg import ddot
self._init_svd()
if self._cache["LhD"] is not None:
return self._cache["LhD"]
S1, U1 = self.C1.eigh()
U1S1 = ddot(U1, 1 / sqrt(S1))
Sh, Uh = eigh(U1S1.T @ self.C0.value() @ U1S1)
self._cache["LhD"] = {
"Lh": (U1S1 @ Uh).T,
"D": 1 / (kron(Sh, self._Sx) + 1),
"De": 1 / (kron(Sh, self._Sxe) + 1),
}
return self._cache["LhD"]
@property
def Lh(self):
"""
Lₕ.
"""
return self._LhD["Lh"]
@property
def D(self):
"""
(Sₕ ⊗ Sₓ + Iₕₓ)⁻¹.
"""
return self._LhD["D"]
@property
def _De(self):
return self._LhD["De"]
@property
def G(self):
"""
User-provided matrix G, n×m.
"""
return self._G
@property
def C0(self):
"""
Semi-definite positive matrix C₀.
"""
return self._C0
@property
def C1(self):
"""
Definite positive matrix C₁.
"""
return self._C1
def value(self):
"""
Covariance matrix K = C₀ ⊗ GGᵀ + C₁ ⊗ I.
Returns
-------
K : ndarray
C₀ ⊗ GGᵀ + C₁ ⊗ I.
"""
C0 = self._C0.value()
C1 = self._C1.value()
return kron(C0, self._GG) + kron(C1, self._I)
def gradient(self):
"""
Gradient of K.
Returns
-------
C0 : ndarray
Derivative of C₀ over its parameters.
C1 : ndarray
Derivative of C₁ over its parameters.
"""
self._init_svd()
C0 = self._C0.gradient()["Lu"].T
C1 = self._C1.gradient()["Lu"].T
grad = {"C0.Lu": kron(C0, self._X).T, "C1.Lu": kron(C1, self._I).T}
return grad
def gradient_dot(self, v):
"""
Implements ∂K⋅v.
Parameters
----------
v : array_like
Vector from ∂K⋅v.
Returns
-------
C0.Lu : ndarray
∂K⋅v, where the gradient is taken over the C₀ parameters.
C1.Lu : ndarray
∂K⋅v, where the gradient is taken over the C₁ parameters.
"""
self._init_svd()
V = unvec(v, (self.G.shape[0], -1) + v.shape[1:])
r = {}
C = self._C0.gradient()["Lu"]
r["C0.Lu"] = tensordot(V.T @ self.G @ self.G.T, C, axes=([-2], [0]))
r["C0.Lu"] = r["C0.Lu"].reshape(V.shape[2:] + (-1,) + (C.shape[-1],), order="F")
C = self._C1.gradient()["Lu"]
r["C1.Lu"] = tensordot(V.T, C, axes=([-2], [0]))
r["C1.Lu"] = r["C1.Lu"].reshape(V.shape[2:] + (-1,) + (C.shape[-1],), order="F")
return r
def solve(self, v):
"""
Implements the product K⁻¹⋅v.
Parameters
----------
v : array_like
Array to be multiplied.
Returns
-------
x : ndarray
Solution x to the equation K⋅x = y.
"""
from numpy_sugar.linalg import ddot
self._init_svd()
L = kron(self.Lh, self.Lx)
return L.T @ ddot(self.D, L @ v, left=True)
def logdet(self):
"""
Implements log|K| = - log|D| + n⋅log|C₁|.
Returns
-------
logdet : float
Log-determinant of K.
"""
self._init_svd()
return -log(self._De).sum() + self.G.shape[0] * self.C1.logdet()
def logdet_gradient(self):
"""
Implements ∂log|K| = Tr[K⁻¹∂K].
It can be shown that::
∂log|K| = diag(D)ᵀdiag(L(∂K)Lᵀ) = diag(D)ᵀ(diag(Lₕ∂C₀Lₕᵀ)⊗diag(LₓGGᵀLₓᵀ)),
when the derivative is over the parameters of C₀. Similarly,
∂log|K| = diag(D)ᵀdiag(L(∂K)Lᵀ) = diag(D)ᵀ(diag(Lₕ∂C₁Lₕᵀ)⊗diag(I)),
over the parameters of C₁.
Returns
-------
C0 : ndarray
Derivative of C₀ over its parameters.
C1 : ndarray
Derivative of C₁ over its parameters.
"""
from numpy_sugar.linalg import dotd
self._init_svd()
dC0 = self._C0.gradient()["Lu"]
grad_C0 = zeros_like(self._C0.Lu)
for i in range(self._C0.Lu.shape[0]):
t = kron(dotd(self.Lh, dC0[..., i] @ self.Lh.T), self._diag_LxGGLxe)
grad_C0[i] = (self._De * t).sum()
dC1 = self._C1.gradient()["Lu"]
grad_C1 = zeros_like(self._C1.Lu)
p = self._Sxe.shape[0]
np = self._G.shape[0] - p
for i in range(self._C1.Lu.shape[0]):
t = (dotd(self.Lh, dC1[..., i] @ self.Lh.T) * np).sum()
t1 = kron(dotd(self.Lh, dC1[..., i] @ self.Lh.T), eye(p))
t += (self._De * t1).sum()
grad_C1[i] = t
return {"C0.Lu": grad_C0, "C1.Lu": grad_C1}
def LdKL_dot(self, v, v1=None):
"""
Implements L(∂K)Lᵀv.
The array v can have one or two dimensions and the first dimension has to have
size n⋅p.
Let vec(V) = v. We have
L(∂K)Lᵀ⋅v = ((Lₕ∂C₀Lₕᵀ) ⊗ (LₓGGᵀLₓᵀ))vec(V) = vec(LₓGGᵀLₓᵀVLₕ∂C₀Lₕᵀ),
when the derivative is over the parameters of C₀. Similarly,
L(∂K)Lᵀv = ((Lₕ∂C₁Lₕᵀ) ⊗ (LₓLₓᵀ))vec(V) = vec(LₓLₓᵀVLₕ∂C₁Lₕᵀ),
over the parameters of C₁.
"""
self._init_svd()
def dot(a, b):
r = tensordot(a, b, axes=([1], [0]))
if a.ndim > b.ndim:
return r.transpose([0, 2, 1])
return r
Lh = self.Lh
V = unvec(v, (self.Lx.shape[0], -1) + v.shape[1:])
LdKL_dot = {
"C0.Lu": empty((v.shape[0],) + v.shape[1:] + (self._C0.Lu.shape[0],)),
"C1.Lu": empty((v.shape[0],) + v.shape[1:] + (self._C1.Lu.shape[0],)),
}
dC0 = self._C0.gradient()["Lu"]
for i in range(self._C0.Lu.shape[0]):
t = dot(self._LxG, dot(self._LxG.T, dot(V, Lh @ dC0[..., i] @ Lh.T)))
LdKL_dot["C0.Lu"][..., i] = t.reshape((-1,) + t.shape[2:], order="F")
dC1 = self._C1.gradient()["Lu"]
for i in range(self._C1.Lu.shape[0]):
t = dot(V, Lh @ dC1[..., i] @ Lh.T)
LdKL_dot["C1.Lu"][..., i] = t.reshape((-1,) + t.shape[2:], order="F")
return LdKL_dot
def __str__(self):
dim = self._C0.L.shape[0]
rank = self._C0.L.shape[1]
msg0 = format_function(self, {"G": "...", "dim": dim, "rank": rank})
msg1 = str(self._C0) + "\n" + str(self._C1)
msg1 = " " + "\n ".join(msg1.split("\n"))
return (msg0 + msg1).rstrip()
| {
"repo_name": "glimix/limix-inference",
"path": "glimix_core/cov/_kron2sum.py",
"copies": "1",
"size": "11640",
"license": "mit",
"hash": -3970539704452696000,
"line_mean": 25.0092592593,
"line_max": 88,
"alpha_frac": 0.4744571022,
"autogenerated": false,
"ratio": 2.8125156445556945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8784926383456994,
"avg_score": 0.00040927265974008913,
"num_lines": 432
} |
from functools import lru_cache
from scipy.special import binom
from vectortween.Animation import Animation
from vectortween.ParallelAnimation import ParallelAnimation
from vectortween.ParametricAnimation import ParametricAnimation
class BezierCurveAnimation(Animation):
"""
animation of a 2d position (convenience class converting polar equation to two parametric equations)
"""
def __init__(self, controlpoints=None, tween=None, ytween=None, noise_fn=None, y_noise_fn=None):
"""
:param equation: polar equation in the form r = f(theta)
:param tween: tween method for the x coordinate (defaults to linear if not specified)
:param ytween: tween method for the y coordinate (defaults to same as that for x coordinate)
:param noise_fn: optional noise function mapping (x, t) -> value
:param y_noise_fn: optional noise function mapping (y, t) -> value
"""
super().__init__(None, None)
if controlpoints is None:
controlpoints = []
if tween is None:
tween = ['linear']
if ytween is None:
ytween = tween
self.tween = tween
self.ytween = ytween
self.noise_fn = noise_fn
self.y_noise_fn = y_noise_fn
self.controlpoints = controlpoints
order = len(controlpoints) - 1
x_terms = []
y_terms = []
for i, c in enumerate(controlpoints):
x_terms.append("{0}*((1-t)**({1}-{2}))*(t**{2})*{3}".format(binom(order, i), order, i, c[0]))
y_terms.append("{0}*((1-t)**({1}-{2}))*(t**{2})*{3}".format(binom(order, i), order, i, c[1]))
# print ("+".join(x_terms))
# print ("+".join(y_terms))
self.anim = ParallelAnimation(
[ParametricAnimation(equation="{}".format("+".join(x_terms)), tween=tween, noise_fn=self.noise_fn),
ParametricAnimation(equation="{}".format("+".join(y_terms)), tween=ytween, noise_fn=self.y_noise_fn)])
self.frm = self.anim.make_frame(0, 0, 0, 1, 1)
self.to = self.anim.make_frame(1, 0, 0, 1, 1)
#@lru_cache(maxsize=1000)
def make_frame(self, frame, birthframe, startframe, stopframe, deathframe, noiseframe=None):
"""
:param frame: current frame
:param birthframe: frame where this animation starts returning something other than None
:param startframe: frame where animation starts to evolve
:param stopframe: frame where animation is completed
:param deathframe: frame where animation starts to return None
:return:
"""
return self.anim.make_frame(frame, birthframe, startframe, stopframe, deathframe, noiseframe)
| {
"repo_name": "shimpe/pyvectortween",
"path": "vectortween/BezierCurveAnimation.py",
"copies": "1",
"size": "2705",
"license": "mit",
"hash": -6104968054944312000,
"line_mean": 43.3442622951,
"line_max": 115,
"alpha_frac": 0.6277264325,
"autogenerated": false,
"ratio": 3.665311653116531,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4793038085616531,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from sqlparse import parse, tokens
from sqlparse.sql import Comment, IdentifierList, Parenthesis, Token
@lru_cache(maxsize=500)
def sql_fingerprint(query, hide_columns=True):
"""
Simplify a query, taking away exact values and fields selected.
Imperfect but better than super explicit, value-dependent queries.
"""
parsed_queries = parse(query)
if not parsed_queries:
return ""
parsed_query = sql_recursively_strip(parsed_queries[0])
sql_recursively_simplify(parsed_query, hide_columns=hide_columns)
return str(parsed_query).strip()
sql_deleteable_tokens = (
tokens.Number,
tokens.Number.Float,
tokens.Number.Integer,
tokens.Number.Hexadecimal,
tokens.String,
tokens.String.Single,
)
def sql_trim(node, idx=0):
tokens = node.tokens
count = len(tokens)
min_count = abs(idx)
while count > min_count and tokens[idx].is_whitespace:
tokens.pop(idx)
count -= 1
def sql_strip(node):
ws_count = 0
for token in node.tokens:
if token.is_whitespace:
token.value = "" if ws_count > 0 else " "
ws_count += 1
else:
ws_count = 0
def sql_recursively_strip(node):
for sub_node in node.get_sublists():
sql_recursively_strip(sub_node)
if isinstance(node, Comment):
return node
sql_strip(node)
# strip duplicate whitespaces between parenthesis
if isinstance(node, Parenthesis):
sql_trim(node, 1)
sql_trim(node, -2)
return node
def sql_recursively_simplify(node, hide_columns=True):
# Erase which fields are being updated in an UPDATE
if node.tokens[0].value == "UPDATE":
i_set = [i for (i, t) in enumerate(node.tokens) if t.value == "SET"][0]
i_where = [
i
for (i, t) in enumerate(node.tokens)
if t.is_group and t.tokens[0].value == "WHERE"
][0]
middle = [Token(tokens.Punctuation, " ... ")]
node.tokens = node.tokens[: i_set + 1] + middle + node.tokens[i_where:]
# Ensure IN clauses with simple value in always simplify to "..."
if node.tokens[0].value == "WHERE":
in_token_indices = (i for i, t in enumerate(node.tokens) if t.value == "IN")
for in_token_index in in_token_indices:
parenthesis = next(
t
for t in node.tokens[in_token_index + 1 :]
if isinstance(t, Parenthesis)
)
if all(
getattr(t, "ttype", "") in sql_deleteable_tokens
for t in parenthesis.tokens[1:-1]
):
parenthesis.tokens[1:-1] = [Token(tokens.Punctuation, "...")]
# Erase the names of savepoints since they are non-deteriministic
if hasattr(node, "tokens"):
# SAVEPOINT x
if str(node.tokens[0]) == "SAVEPOINT":
node.tokens[2].tokens[0].value = "`#`"
return
# RELEASE SAVEPOINT x
elif len(node.tokens) >= 3 and node.tokens[2].value == "SAVEPOINT":
node.tokens[4].tokens[0].value = "`#`"
return
# ROLLBACK TO SAVEPOINT X
token_values = [getattr(t, "value", "") for t in node.tokens]
if len(node.tokens) == 7 and token_values[:6] == [
"ROLLBACK",
" ",
"TO",
" ",
"SAVEPOINT",
" ",
]:
node.tokens[6].tokens[0].value = "`#`"
return
# Erase volatile part of PG cursor name
if node.tokens[0].value.startswith('"_django_curs_'):
node.tokens[0].value = '"_django_curs_#"'
prev_word_token = None
for token in node.tokens:
ttype = getattr(token, "ttype", None)
# Detect IdentifierList tokens within an ORDER BY, GROUP BY or HAVING
# clauses
inside_order_group_having = match_keyword(
prev_word_token, ["ORDER BY", "GROUP BY", "HAVING"]
)
replace_columns = not inside_order_group_having and hide_columns
if isinstance(token, IdentifierList) and replace_columns:
token.tokens = [Token(tokens.Punctuation, "...")]
elif hasattr(token, "tokens"):
sql_recursively_simplify(token, hide_columns=hide_columns)
elif ttype in sql_deleteable_tokens:
token.value = "#"
elif getattr(token, "value", None) == "NULL":
token.value = "#"
if not token.is_whitespace:
prev_word_token = token
def match_keyword(token, keywords):
"""
Checks if the given token represents one of the given keywords
"""
if not token:
return False
if not token.is_keyword:
return False
return token.value.upper() in keywords
| {
"repo_name": "YPlan/django-perf-rec",
"path": "src/django_perf_rec/sql.py",
"copies": "1",
"size": "4796",
"license": "mit",
"hash": -4027937071381561000,
"line_mean": 28.975,
"line_max": 84,
"alpha_frac": 0.5806922435,
"autogenerated": false,
"ratio": 3.7645211930926217,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48452134365926214,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from starlette import status
from fastapi import Depends, HTTPException
from opentrons.api import MainRouter
from opentrons.hardware_control import ThreadManager, ThreadedAsyncLock
from robot_server.hardware_wrapper import HardwareWrapper
from robot_server.service.session.manager import SessionManager
from robot_server.service.protocol.manager import ProtocolManager
from robot_server.service.legacy.rpc import RPCServer
# The single instance of the RPCServer
_rpc_server_instance = None
# The single instance of the SessionManager
_session_manager_inst = None
api_wrapper = HardwareWrapper()
async def verify_hardware():
"""
A dependency that raises an http exception if hardware is not ready. Must
only be used in PATH operation.
"""
if not api_wrapper.get_hardware():
raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
detail="Robot is not ready for request")
async def get_hardware() -> ThreadManager:
"""Hardware dependency"""
return api_wrapper.get_hardware()
@lru_cache(maxsize=1)
def get_motion_lock() -> ThreadedAsyncLock:
"""
Get the single motion lock.
:return: a threaded async lock
"""
return ThreadedAsyncLock()
async def get_rpc_server() -> RPCServer:
"""The RPC Server instance"""
global _rpc_server_instance
if not _rpc_server_instance:
h = await get_hardware()
root = MainRouter(h, lock=get_motion_lock())
_rpc_server_instance = RPCServer(None, root)
return _rpc_server_instance
@lru_cache(maxsize=1)
def get_protocol_manager() -> ProtocolManager:
"""The single protocol manager instance"""
return ProtocolManager()
def get_session_manager(
hardware: ThreadManager = Depends(get_hardware),
motion_lock: ThreadedAsyncLock = Depends(get_motion_lock),
protocol_manager: ProtocolManager = Depends(get_protocol_manager)) \
-> SessionManager:
"""The single session manager instance"""
global _session_manager_inst
if not _session_manager_inst:
_session_manager_inst = SessionManager(
hardware=hardware,
motion_lock=motion_lock,
protocol_manager=protocol_manager)
return _session_manager_inst
| {
"repo_name": "OpenTrons/opentrons-api",
"path": "robot-server/robot_server/service/dependencies.py",
"copies": "2",
"size": "2299",
"license": "apache-2.0",
"hash": 2336353709215379000,
"line_mean": 29.25,
"line_max": 77,
"alpha_frac": 0.708568943,
"autogenerated": false,
"ratio": 4.090747330960854,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5799316273960854,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from sympy import Symbol
from sympy.parsing.sympy_parser import parse_expr
from vectortween.Animation import Animation
from vectortween.Tween import Tween
class ParametricAnimation(Animation):
"""
class to animate the value of a number between startframe and stopframe
tweening optionally can be applied (default is None, which means linear animation)
"""
def __init__(self, equation="t", tween=None, noise_fn=None):
"""
:param equation: parametric equation written as a string, expressed in terms of parameter "t".
:param tween: optional tweening specification
:param noise_fn: 2d function accepting a value ( equation(0) <= value <= equation(1)) and a time (0 <= t <= 1).
By accepting and using t, the noise is animated in time. By accepting but ignoring t, the noise is only spatial.
"""
if tween is None:
tween = ['linear']
if not equation:
equation = "t"
self.tween = tween
self.T = Tween(*tween)
self.noise_fn = noise_fn
self.equation = parse_expr(equation)
t = Symbol('t')
frm = self.equation.evalf(subs={t: 0})
to = self.equation.evalf(subs={t: 1})
super().__init__(frm, to)
def delayed_version(self, delay):
t = Symbol("t")
new_equation = self.equation.subs(t, t - delay)
def new_noise_fn(value, t):
return self.noise_fn(value, t - delay)
return ParametricAnimation(equation="{}".format(new_equation), tween=self.tween,
noise_fn=new_noise_fn if self.noise_fn else None)
def speedup_version(self, factor):
t = Symbol("t")
new_equation = self.equation.subs(t, t * factor)
def new_noise_fn(value, t):
return self.noise_fn(value, t * factor)
return ParametricAnimation(equation="{}".format(new_equation), tween=self.tween,
noise_fn=new_noise_fn if self.noise_fn else None)
def translated_version(self, amount):
new_equation = self.equation + amount
def new_noise_fn(value, t):
return self.noise_fn(value + amount, t)
return ParametricAnimation(equation="{}".format(new_equation), tween=self.tween,
noise_fn=new_noise_fn if self.noise_fn else None)
def scaled_version(self, amount):
new_equation = self.equation * amount
def new_noise_fn(value, t):
return self.noise_fn(value * amount, t)
return ParametricAnimation(equation="{}".format(new_equation), tween=self.tween,
noise_fn=new_noise_fn if self.noise_fn else None)
def scaled_translate_version(self, scale, offset):
new_equation = self.equation * scale + offset
def new_noise_fn(value, t):
return self.noise_fn(value * scale + offset, t)
return ParametricAnimation(equation="{}".format(new_equation), tween=self.tween,
noise_fn=new_noise_fn if self.noise_fn else None)
def timereversed_version(self):
t = Symbol("t")
new_equation = self.equation.subs(t, 1 - t)
def new_noise_fn(value, t):
return self.noise_fn(value, 1 - t)
return ParametricAnimation(equation="{}".format(new_equation), tween=self.tween,
noise_fn=new_noise_fn if self.noise_fn else None)
#@lru_cache(maxsize=1000)
def make_frame(self, frame, birthframe, startframe, stopframe, deathframe, noiseframe=None):
"""
animation happens between startframe and stopframe
the value is None before aliveframe, and after deathframe
* if aliveframe is not specified it defaults to startframe
* if deathframe is not specified it defaults to stopframe
initial value is held from aliveframe to startframe
final value is held from stopfrome to deathframe
"""
if birthframe is None:
birthframe = startframe
if deathframe is None:
deathframe = stopframe
if frame < birthframe:
return None
if frame > deathframe:
return None
if frame < startframe:
return self.frm
if frame > stopframe:
return self.to
parameter_value = self.T.tween2(frame, startframe, stopframe)
t = Symbol('t')
if self.noise_fn is not None:
if noiseframe is not None:
nf = noiseframe
else:
nf = parameter_value
noise_value = self.noise_fn(frame, nf)
else:
noise_value = 0
return self.equation.evalf(subs={t: parameter_value}) + noise_value
| {
"repo_name": "shimpe/pyvectortween",
"path": "vectortween/ParametricAnimation.py",
"copies": "1",
"size": "4846",
"license": "mit",
"hash": 6925188636088629000,
"line_mean": 36.2769230769,
"line_max": 121,
"alpha_frac": 0.5980189847,
"autogenerated": false,
"ratio": 3.988477366255144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0027481088037357095,
"num_lines": 130
} |
from functools import lru_cache
from sympy import Symbol, pi, sin, cos
from sympy.parsing.sympy_parser import parse_expr
from vectortween.Animation import Animation
from vectortween.ParallelAnimation import ParallelAnimation
from vectortween.ParametricAnimation import ParametricAnimation
class PolarAnimation(Animation):
"""
animation of a 2d position (convenience class converting polar equation to two parametric equations)
"""
def __init__(self, equation="100*sin(5*theta)", offset=None, scale=None, tween=None, ytween=None, noise_fn=None,
y_noise_fn=None):
"""
:param equation: polar equation in the form r = f(theta)
:param tween: tween method for the x coordinate (defaults to linear if not specified)
:param ytween: tween method for the y coordinate (defaults to same as that for x coordinate)
:param noise_fun: optional noise function (defaults to None) for the x coordinate. Maps (x,t) -> value
:param y_noise_fun: optional noise function (defaults to None) for the y coordinate. Maps (y,t) -> value
"""
super().__init__(None, None)
if scale is None:
scale = [1, 1]
if offset is None:
offset = [0, 0]
if ytween is None:
ytween = tween
self.tween = tween
self.ytween = ytween
self.equation = parse_expr(equation)
self.offset = offset
self.scale = scale
theta = Symbol("theta")
t = Symbol("t")
self.noise_fn = noise_fn
self.y_noise_fn = y_noise_fn
def new_noise_fn(value, t):
return self.noise_fn(value, 2 * pi * t)
def new_y_noise_fn(value, t):
return self.y_noise_fn(value, 2 * pi * t)
self.equation_timestretched = self.equation.subs(theta, 2 * pi * t)
self.frm = (scale[0] * self.equation_timestretched * sin(2 * pi * t) + offset[0]).evalf(subs={t: 0})
self.to = (scale[1] * self.equation_timestretched * cos(2 * pi * t) + offset[1]).evalf(subs={t: 1})
self.anim = ParallelAnimation([ParametricAnimation(equation="{}".format(
scale[0] * self.equation_timestretched * sin(2 * pi * t) + offset[0]),
tween=tween, noise_fn=new_noise_fn if self.noise_fn else None),
ParametricAnimation(equation="{}".format(
scale[1] * self.equation_timestretched * cos(2 * pi * t) + offset[1]),
tween=ytween, noise_fn=new_y_noise_fn if self.y_noise_fn else None)])
def delayed_version(self, delay):
t = Symbol("t")
new_equation = self.equation.subs(t, t - delay)
def new_noise_fn(value, t):
return self.noise_fn(value, t - delay)
def new_y_noise_fn(value, t):
return self.y_noise_fn(value, t - delay)
return PolarAnimation(equation="{}".format(new_equation),
offset=self.offset, scale=self.scale, tween=self.tween, ytween=self.ytween,
noise_fn=new_noise_fn if self.noise_fn else None,
y_noise_fn=new_y_noise_fn if self.y_noise_fn else None)
def speedup_version(self, factor):
t = Symbol("t")
new_equation = self.equation.subs(t, t * factor)
def new_noise_fn(value, t):
return self.noise_fn(value, t * factor)
def new_y_noise_fn(value, t):
return self.y_noise_fn(value, t * factor)
return PolarAnimation(equation="{}".format(new_equation),
offset=self.offset, scale=self.scale, tween=self.tween, ytween=self.ytween,
noise_fn=new_noise_fn if self.noise_fn else None,
y_noise_fn=new_y_noise_fn if self.y_noise_fn else None)
def translated_version(self, offset):
new_equation = self.equation
new_offset = [self.offset[0] + offset[0], self.offset[1] + offset[1]]
def new_noise_fn(value, t):
return self.noise_fn(value + offset[0], t)
def new_y_noise_fn(value, t):
return self.y_noise_fn(value + offset[1], t)
return PolarAnimation(equation="{}".format(new_equation),
offset=new_offset, scale=self.scale, tween=self.tween, ytween=self.ytween,
noise_fn=new_noise_fn if self.noise_fn else None,
y_noise_fn=new_y_noise_fn if self.y_noise_fn else None)
def scaled_version(self, scale):
new_equation = self.equation
new_scale = [self.scale[0] * scale[0], self.scale[1] * scale[1]]
def new_noise_fn(value, t):
return self.noise_fn(value * scale[0], t)
def new_y_noise_fn(value, t):
return self.y_noise_fn(value * scale[1], t)
return PolarAnimation(equation="{}".format(new_equation),
offset=self.offset, scale=new_scale, tween=self.tween, ytween=self.ytween,
noise_fn=new_noise_fn if self.noise_fn else None,
y_noise_fn=new_y_noise_fn if self.y_noise_fn else None)
def scaled_translate_version(self, scale, offset):
new_equation = self.equation
new_offset = [self.offset[0] + offset[0], self.offset[1] + offset[1]]
new_scale = [self.scale[0] * scale[0], self.scale[1] * scale[1]]
def new_noise_fn(value, t):
return self.noise_fn(value * scale[0] + offset[0], t)
def new_y_noise_fn(value, t):
return self.y_noise_fn(value * scale[1] + offset[1], t)
return PolarAnimation(equation="{}".format(new_equation),
offset=new_offset, scale=new_scale, tween=self.tween, ytween=self.ytween,
noise_fn=new_noise_fn if self.noise_fn else None,
y_noise_fn=new_y_noise_fn if self.y_noise_fn else None)
def timereversed_version(self):
t = Symbol("t")
new_equation = self.equation.subs(t, 1 - t)
def new_noise_fn(value, t):
return self.noise_fn(value, 1 - t)
def new_y_noise_fn(value, t):
return self.y_noise_fn(value, 1 - t)
return PolarAnimation(equation="{}".format(new_equation),
offset=self.offset, scale=self.scale, tween=self.tween, ytween=self.ytween,
noise_fn=new_noise_fn if self.noise_fn else None,
y_noise_fn=new_y_noise_fn if self.y_noise_fn else None)
#@lru_cache(maxsize=1000)
def make_frame(self, frame, birthframe, startframe, stopframe, deathframe, noiseframe=None):
"""
:param frame: current frame
:param birthframe: frame where this animation starts returning something other than None
:param startframe: frame where animation starts to evolve
:param stopframe: frame where animation is completed
:param deathframe: frame where animation starts to return None
:return:
"""
return self.anim.make_frame(frame, birthframe, startframe, stopframe, deathframe, noiseframe)
| {
"repo_name": "shimpe/pyvectortween",
"path": "vectortween/PolarAnimation.py",
"copies": "1",
"size": "7189",
"license": "mit",
"hash": -696790920484641500,
"line_mean": 43.93125,
"line_max": 116,
"alpha_frac": 0.5868688274,
"autogenerated": false,
"ratio": 3.5695134061569016,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46563822335569016,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from vectortween.Animation import Animation
from vectortween.Mapping import Mapping
from vectortween.Tween import Tween
class NumberAnimation(Animation):
"""
class to animate the value of a number between startframe and stopframe
tweening optionally can be applied (default is None, which means linear animation)
"""
def __init__(self, frm, to, tween=None, noise_fn=None):
"""
:param frm: start value
:param to: end value
:param tween: optional tweening function (default: linear)
:param noise_fn: optional noise function (default: None)
noise_fn needs to accept two parameters: a value (frm <= value <= to) and a time (0 <= time <= 1)
if the noise_fn uses parameter t the noise will be animated in time; by accepting but ignoring t,
the noise is only spatial
"""
super().__init__(frm, to)
if tween is None:
tween = ['linear']
self.noise_fn = noise_fn
self.T = Tween(*tween)
#@lru_cache(maxsize=1000)
def make_frame(self, frame, birthframe, startframe, stopframe, deathframe, noiseframe=None):
"""
animation happens between startframe and stopframe
the value is None before aliveframe, and after deathframe
* if aliveframe is not specified it defaults to startframe
* if deathframe is not specified it defaults to stopframe
initial value is held from aliveframe to startframe
final value is held from stopfrome to deathframe
"""
if birthframe is None:
birthframe = startframe
if deathframe is None:
deathframe = stopframe
if frame < birthframe:
return None
if frame > deathframe:
return None
if frame < startframe:
return self.frm
if frame > stopframe:
return self.to
t = self.T.tween2(frame, startframe, stopframe)
newval = Mapping.linlin(t, 0, 1, self.frm, self.to)
if self.noise_fn is not None:
if noiseframe is not None:
nf = noiseframe
else:
nf = t
noise_val = self.noise_fn(newval, nf)
else:
noise_val = 0
return newval + noise_val
| {
"repo_name": "shimpe/pyvectortween",
"path": "vectortween/NumberAnimation.py",
"copies": "1",
"size": "2356",
"license": "mit",
"hash": -6466285005523409000,
"line_mean": 33.6470588235,
"line_max": 107,
"alpha_frac": 0.6056876061,
"autogenerated": false,
"ratio": 4.184724689165186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5290412295265187,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
from vectortween.Animation import Animation
from vectortween.NumberAnimation import NumberAnimation
from vectortween.Tween import Tween
class PointAnimation(Animation):
"""
animation of a 2d position (convenience class composing two number animations)
"""
def __init__(self, frm, to, tween=None, ytween=None, noise_fn=None, y_noise_fn=None, xy_noise_fn=None):
"""
:param frm: a list/tuple containing an (x, y) number (starting point; floats)
:param to: a list/tuple containing an (x, y) number (end point; floats)
:param tween: tween method for the x coordinate (defaults to linear if not specified)
:param ytween: tween method for the y coordinate (defaults to same as that for x coordinate)
:param noise_fn: optional noise function for x,t coordinates, returning single value
:param y_noise_fn: optional noise function for y,t coordinates, returning single value
:param xy_noise_fn: optional noise function for x,y,t coordinates, returning two values
"""
super().__init__(frm, to)
if ytween is None:
ytween = tween
self.xy_noise_fn = xy_noise_fn
self.anim_x = NumberAnimation(self.frm[0], self.to[0], tween, noise_fn)
self.anim_y = NumberAnimation(self.frm[1], self.to[1], ytween, y_noise_fn)
#@lru_cache(maxsize=1000)
def make_frame(self, frame, birthframe, startframe, stopframe, deathframe, noiseframe=None):
"""
:param frame: current frame
:param birthframe: frame where this animation starts returning something other than None
:param startframe: frame where animation starts to evolve
:param stopframe: frame where animation is completed
:param deathframe: frame where animation starts to return None
:return:
"""
newx = self.anim_x.make_frame(frame, birthframe, startframe, stopframe, deathframe, noiseframe)
newy = self.anim_y.make_frame(frame, birthframe, startframe, stopframe, deathframe, noiseframe)
if self.xy_noise_fn is not None:
if noiseframe is not None:
t = noiseframe
else:
t = Tween.tween2(frame, startframe, stopframe)
addx, addy = self.xy_noise_fn(newx, newy, t)
else:
addx, addy = 0, 0
return newx + addx, newy + addy
| {
"repo_name": "shimpe/pyvectortween",
"path": "vectortween/PointAnimation.py",
"copies": "1",
"size": "2417",
"license": "mit",
"hash": -9050388031006177000,
"line_mean": 45.4807692308,
"line_max": 107,
"alpha_frac": 0.6578402979,
"autogenerated": false,
"ratio": 3.7472868217054263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9879709393548137,
"avg_score": 0.0050835452114577715,
"num_lines": 52
} |
from functools import lru_cache
from vectortween.Animation import Animation
from vectortween.NumberAnimation import NumberAnimation
class ColorAnimation(Animation):
"""
class to animate a color
colors are specified as tuples (r,g,b) where for each color component c 0<=c<=1
"""
def __init__(self, frm, to, tween=None, tweengreen=None, tweenblue=None, tweenalpha=None):
"""
:param frm: start color, e.g. (1,0,0) for red (optionally also specify an alpha component)
:param to: stop color, e.g. (0,1,0) for green (optionally also specify an alpha component)
:param tween: tween method for red color component (defaults to linear if not specified)
:param tweengreen: tween method for green color component (defaults to same as red if not specified)
:param tweenblue: tween method for blue color component (defaults to same as red if not specified)
:param tweenalpha: tween method for alpha color component (defaults to Linear if not specified)
Note: output will contain alpha if input contains alpha
"""
super().__init__(frm, to)
try:
dummy = frm[3]
self.use_alpha = True
except IndexError:
self.use_alpha = False
pass
if tweenalpha is None:
tweenalpha = ['linear']
self.anim_red = NumberAnimation(self.frm[0], self.to[0], tween)
self.anim_green = NumberAnimation(self.frm[1], self.to[1], tweengreen)
self.anim_blue = NumberAnimation(self.frm[2], self.to[2], tweenblue)
if self.use_alpha:
self.anim_alpha = NumberAnimation(self.frm[3], self.to[3], tweenalpha)
else:
self.anim_alpha = NumberAnimation(1, 1, tweenalpha)
@staticmethod
def __clip(val, minimum, maximum):
"""
:param val: input value
:param minimum: min value
:param maximum: max value
:return: val clipped to range [minimum, maximum]
"""
if val is None or minimum is None or maximum is None:
return None
if val < minimum:
return minimum
if val > maximum:
return maximum
return val
#@lru_cache(maxsize=1000)
def make_frame(self, frame, birthframe, startframe, stopframe, deathframe, noiseframe=None):
"""
:param frame: current frame
:param birthframe: frame where animation starts to return something other than None
:param startframe: frame where animation starts to evolve
:param stopframe: frame where animation stops evolving
:param deathframe: frame where animation starts to return None
:return:
"""
if self.use_alpha:
return (self.__clip(self.anim_red.make_frame(frame, birthframe, startframe, stopframe, deathframe, noiseframe), 0, 1),
self.__clip(self.anim_green.make_frame(frame, birthframe, startframe, stopframe, deathframe, noiseframe), 0, 1),
self.__clip(self.anim_blue.make_frame(frame, birthframe, startframe, stopframe, deathframe, noiseframe), 0, 1))
else:
return (self.__clip(self.anim_red.make_frame(frame, birthframe, startframe, stopframe, deathframe, noiseframe), 0, 1),
self.__clip(self.anim_green.make_frame(frame, birthframe, startframe, stopframe, deathframe, noiseframe), 0, 1),
self.__clip(self.anim_blue.make_frame(frame, birthframe, startframe, stopframe, deathframe, noiseframe), 0, 1),
self.__clip(self.anim_alpha.make_frame(frame, birthframe, startframe, stopframe, deathframe, noiseframe), 0, 1))
| {
"repo_name": "shimpe/pyvectortween",
"path": "vectortween/ColorAnimation.py",
"copies": "1",
"size": "3700",
"license": "mit",
"hash": 1762736590989704000,
"line_mean": 45.835443038,
"line_max": 132,
"alpha_frac": 0.6364864865,
"autogenerated": false,
"ratio": 3.9278131634819533,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5064299649981954,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
import hashlib
salt = 'yjdafjpo'
#salt = 'abc'
hashes = {}
@lru_cache(None)
def first_with_three(s):
cnt = 0
prev_x = None
for x in s:
if prev_x != x:
cnt = 0
cnt += 1
if cnt == 3:
return x
prev_x = x
return None
@lru_cache(None)
def seq_count(s):
prev_x = None
cnt = 1
cnts = {}
for x in s:
if prev_x and prev_x != x:
if prev_x not in cnts or cnts[prev_x] < cnt:
cnts[prev_x] = cnt
cnt = 1
elif prev_x:
cnt += 1
prev_x = x
if prev_x not in cnts or cnts[prev_x] < cnt:
cnts[prev_x] = cnt
return cnts
@lru_cache(None)
def hash(s, i):
h = s + str(i)
# part 1
# return hashlib.md5(h.encode('ascii')).hexdigest()
for _ in range(0, 2017):
h = hashlib.md5(h.encode('ascii')).hexdigest()
return h
idx = 0
keys = 0
found = False
while not found:
h = hash(salt, idx)
t = first_with_three(h)
if t:
for x in range(idx+1, idx+1001):
h_2 = hash(salt, x)
cnts = seq_count(h_2)
if t in cnts and cnts[t] > 4:
keys += 1
print("Found key " + str(keys) + " at index " + str(idx))
if keys == 72:
found = True
found_key = True
break
idx += 1 | {
"repo_name": "matslindh/codingchallenges",
"path": "adventofcode2016/14.py",
"copies": "1",
"size": "1448",
"license": "mit",
"hash": -9010889376516700000,
"line_mean": 15.6551724138,
"line_max": 73,
"alpha_frac": 0.4613259669,
"autogenerated": false,
"ratio": 3.196467991169978,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41577939580699774,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
import jsonref
from ocdsmerge.util import get_release_schema_url, get_tags
def get_merge_rules(schema=None):
"""
Returns merge rules as key-value pairs, in which the key is a JSON path as a tuple, and the value is a list of
merge properties whose values are `true`.
"""
schema = schema or get_release_schema_url(get_tags()[-1])
if isinstance(schema, dict):
return _get_merge_rules_from_dereferenced_schema(jsonref.JsonRef.replace_refs(schema))
else:
return _get_merge_rules_from_url_or_path(schema)
@lru_cache()
def _get_merge_rules_from_url_or_path(schema):
if schema.startswith('http'):
deref_schema = jsonref.load_uri(schema)
else:
with open(schema) as f:
deref_schema = jsonref.load(f)
return _get_merge_rules_from_dereferenced_schema(deref_schema)
def _get_merge_rules_from_dereferenced_schema(deref_schema):
return dict(_get_merge_rules(deref_schema['properties']))
def _get_merge_rules(properties, path=None):
"""
Yields merge rules as key-value pairs, in which the first element is a JSON path as a tuple, and the second element
is a list of merge properties whose values are `true`.
"""
if path is None:
path = ()
for key, value in properties.items():
new_path = path + (key,)
types = _get_types(value)
# `omitWhenMerged` supersedes all other rules.
# See https://standard.open-contracting.org/1.1/en/schema/merging/#discarded-fields
if value.get('omitWhenMerged') or value.get('mergeStrategy') == 'ocdsOmit':
yield new_path, {'omitWhenMerged'}
# `wholeListMerge` supersedes any nested rules.
# See https://standard.open-contracting.org/1.1/en/schema/merging/#whole-list-merge
elif 'array' in types and (value.get('wholeListMerge') or value.get('mergeStrategy') == 'ocdsVersion'):
yield new_path, {'wholeListMerge'}
# See https://standard.open-contracting.org/1.1/en/schema/merging/#object-values
elif 'object' in types and 'properties' in value:
yield from _get_merge_rules(value['properties'], path=new_path)
# See https://standard.open-contracting.org/1.1/en/schema/merging/#whole-list-merge
elif 'array' in types and 'items' in value:
item_types = _get_types(value['items'])
if any(item_type != 'object' for item_type in item_types):
yield new_path, {'wholeListMerge'}
elif 'object' in item_types and 'properties' in value['items']:
if 'id' not in value['items']['properties']:
yield new_path, {'wholeListMerge'}
else:
yield from _get_merge_rules(value['items']['properties'], path=new_path)
def _get_types(prop):
"""
Returns a property's `type` as a list.
"""
if 'type' not in prop:
return []
if isinstance(prop['type'], str):
return [prop['type']]
return prop['type']
| {
"repo_name": "open-contracting/ocds-merge",
"path": "ocdsmerge/rules.py",
"copies": "1",
"size": "3045",
"license": "bsd-3-clause",
"hash": -657399837298094800,
"line_mean": 38.5454545455,
"line_max": 119,
"alpha_frac": 0.6357963875,
"autogenerated": false,
"ratio": 3.6078199052132702,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.474361629271327,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
import logging
from typing import Dict
import spot
from config import LTL3BA_PATH
from helpers.python_ext import is_empty_str
from helpers.shell import execute_shell
from interfaces.LTL_to_automaton import LTLToAutomaton
from interfaces.automaton import Automaton
from interfaces.expr import Expr, Signal
from LTL_to_atm.ast_to_ltl3ba import ConverterToLtl2BaFormatVisitor
from LTL_to_atm.spot_atm_converter import spotAtm_to_automaton
from parsing.weak_until_converter import WeakToUntilConverterVisitor
class LTLToAtmViaLTL3BA(LTLToAutomaton):
_execute_cmd = LTL3BA_PATH + ' -H3 -f'
@staticmethod
@lru_cache()
def convert(expr:Expr, states_prefix:str='', timeout:int=None) -> Automaton:
expr = WeakToUntilConverterVisitor().dispatch(expr) # ltl3ba 1.1.2 does not support W
format_converter = ConverterToLtl2BaFormatVisitor()
property_in_ltl2ba_format = format_converter.dispatch(expr)
logging.debug('Ltl2UCW: converting:\n' + property_in_ltl2ba_format)
return LTLToAtmViaLTL3BA._convert_raw(property_in_ltl2ba_format,
format_converter.signal_by_name,
states_prefix,
timeout)
@staticmethod
def _convert_raw(property_:str, signal_by_name:Dict[str, Signal], states_prefix, timeout=None) -> Automaton:
""" :param property_: in the LTL2BA format (we do NOT negate it!) """
rc, ba, err = execute_shell('{0} "{1}"'.format(LTLToAtmViaLTL3BA._execute_cmd, property_),
timeout=timeout)
assert rc == 0, str(rc) + ', err: ' + str(err) + ', out: ' + str(ba)
assert is_empty_str(err), err
logging.debug(ba)
aut = spot.automaton(ba + '\n') # type: spot.twa
# (when SPOT sees `\n` it treats input as the string)
atm = spotAtm_to_automaton(aut, states_prefix, signal_by_name, property_)
atm.name = property_
return atm
| {
"repo_name": "5nizza/party-elli",
"path": "LTL_to_atm/translator_via_ltl3ba.py",
"copies": "1",
"size": "2069",
"license": "mit",
"hash": -1540349401247320000,
"line_mean": 39.568627451,
"line_max": 112,
"alpha_frac": 0.6457225713,
"autogenerated": false,
"ratio": 3.4085667215815487,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9538796459216448,
"avg_score": 0.0030985667330202626,
"num_lines": 51
} |
from functools import lru_cache
import math
from scipy import optimize, stats, integrate
import numpy as np
import quaternion # adds to numpy # noqa # pylint: disable=unused-import
import cv2
from scipy.optimize import leastsq
from visnav.settings import *
class ImageProc:
latest_opt = None
show_fit = None
@staticmethod
def add_noise_to_image(image, noise_img_file):
tmp = cv2.imread(noise_img_file, cv2.IMREAD_UNCHANGED)
noise_img = cv2.resize(tmp, None,
fx=image.shape[1] / tmp.shape[1],
fy=image.shape[0] / tmp.shape[0],
interpolation=cv2.INTER_CUBIC)
return cv2.add(image, noise_img[:, :, 3])
@staticmethod
def crop_and_zoom_image(image, x_off, y_off, width, height, scale, trg_w_h=None, others=tuple()):
tw, th = trg_w_h
if scale is None:
scale = min(th / height, tw / width)
res = []
for img in [image] + list(others):
imgc = cv2.resize(img[y_off:y_off + height, x_off:x_off + width], None, fx=scale, fy=scale,
interpolation=cv2.INTER_AREA)
oh, ow = img.shape
ch, cw = imgc.shape
if trg_w_h is not None:
if x_off + width >= ow:
x0 = tw - cw
elif x_off <= 0:
x0 = 0
else:
x0 = (tw - cw) // 2
if y_off + height >= oh:
y0 = th - ch
elif y_off <= 0:
y0 = 0
else:
y0 = (th - ch) // 2
imgd = np.zeros((th, tw), dtype=img.dtype)
imgd[y0:y0 + ch, x0:x0 + cw] = imgc
else:
imgd = imgc
res.append(imgd)
if len(others) > 0:
return res
return res[0]
@staticmethod
def single_object_bounds(img, threshold, crop_marg, min_px, debug=False):
# binary image
_, mask = cv2.threshold(img, threshold, 255, cv2.THRESH_BINARY)
# remove stars
mask = cv2.erode(mask, ImageProc.bsphkern(9), iterations=1)
if np.sum(mask) < min_px:
return (None,) * 4
# detect target
x_, y_, w_, h_ = cv2.boundingRect(mask)
# add margin
x, y = max(0, x_ - crop_marg), max(0, y_ - crop_marg)
w = min(mask.shape[1] - x, w_ + 2 * crop_marg - (x - x_ + crop_marg))
h = min(mask.shape[0] - y, h_ + 2 * crop_marg - (y - y_ + crop_marg))
if debug:
img_color = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
img_color = cv2.rectangle(img_color, (x, y), (x + w, y + h), (0, 0, 255), thickness=1)
img_color[y + h // 2, x + w // 2] = (0, 0, 255)
cv2.imshow('box', cv2.resize(img_color, (512, 512)))
return x, y, w, h
@staticmethod
def equalize_brightness(image, ref_image, percentile=98, image_gamma=1):
image = ImageProc.adjust_gamma(image, 1 / image_gamma)
ip = np.percentile(image, percentile)
rp = np.percentile(ImageProc.adjust_gamma(ref_image, 1 / image_gamma), percentile)
image = cv2.convertScaleAbs(image, None, rp / ip, 0)
return ImageProc.adjust_gamma(image, image_gamma)
@staticmethod
def normalize_brightness(image, quantiles=(0.0005, 0.9999), top_margin=1.2, src_gamma=1.0, gamma=1.0):
image = ImageProc.adjust_gamma(image, src_gamma, inverse=True)
bot_v, top_v = np.quantile(image, quantiles)
top_v = top_v * top_margin
if image.dtype == np.uint8:
sc = 255 / (top_v - bot_v)
image = cv2.convertScaleAbs(image, None, sc, -bot_v * sc)
elif image.dtype in (float, np.float32):
sc = 1 / (top_v - bot_v)
image = np.clip((image - bot_v) * sc, 0, 1)
else:
assert False, 'unsupported image dtype: %s' % image.dtype
return ImageProc.adjust_gamma(image, gamma)
@staticmethod
def default_preprocess(image, max=255):
bg = np.percentile(image, 250 / 1024 * 100)
return ImageProc.adjust_gamma(np.clip((image - bg) * max / (max - bg), 0, max), 1.8)
@staticmethod
def change_color_depth(img, src_bits, dst_bits):
if src_bits == dst_bits:
return img
if str(img.dtype)[:4] == 'uint':
new_type = 'uint' + str(math.ceil(dst_bits / 8) * 8)
else:
new_type = img.dtype
if src_bits < dst_bits:
img = img.astype(new_type)
img = img * (2 ** (dst_bits - src_bits))
if src_bits > dst_bits:
img = img.astype(new_type)
return img
@staticmethod
def remove_bg(img, bg_img, gain=None, max_val=None, offset=0):
if gain is None:
# estimate correct gain
cost_fun = lambda g: np.var((img - g[0] * bg_img).reshape((-1, 3)), axis=0)
x, _ = leastsq(cost_fun, np.array([1]))
gain = x[0]
print('estimated bg gain: %f' % gain)
imgr = img.astype('float') - gain * bg_img
if offset not in (None, False):
imgr += offset - np.min(imgr)
if max_val and offset is not False:
return np.clip(imgr, 0, max_val).astype(img.dtype)
return imgr
@staticmethod
def color_correct(img, bgr_mx, inverse=False, max_val=None):
assert img.shape[2] == 3, 'can only do to BGR images'
if inverse:
bgr_mx = np.linalg.inv(bgr_mx)
imgc = bgr_mx.dot(img.reshape((-1, 3)).T).T.reshape(img.shape)
if max_val:
return np.clip(imgc, 0, max_val).astype(img.dtype)
return imgc
@staticmethod
def adjust_gamma(image, gamma, gamma_break=None, linear_part=True, inverse=False, max_val=255):
if gamma == 1:
return image
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = gamma if inverse else 1.0 / gamma
gamma_break = gamma_break or 0
if image.dtype == 'uint8' and gamma_break == 0:
# apply gamma correction using the lookup table
max_val = min(max_val, 255)
table = np.array([((i / max_val) ** invGamma) * max_val for i in np.arange(0, max_val + 1)]).astype(
image.dtype)
adj_img = cv2.LUT(image, table)
elif gamma_break == 0:
adj_img = np.round(((image / max_val) ** invGamma) * max_val).astype(image.dtype)
elif True:
# from https://se.mathworks.com/help/vision/ref/gammacorrection.html
b_p = gamma_break
s_ls = 1 / (gamma / b_p ** (1 / gamma - 1) - gamma * gamma_break + gamma_break)
f_s = gamma * s_ls / b_p ** (1 / gamma - 1)
c_o = f_s * b_p ** (1 / gamma) - s_ls * b_p
img = image.flatten() / max_val
I = img <= (s_ls if inverse else 1) * b_p
nI = np.logical_not(I)
adj_img = np.zeros(image.shape).flatten()
adj_img[I] = (img[I] / s_ls) if inverse else (img[I] * s_ls)
adj_img[nI] = (((img[nI] + c_o) / f_s) ** gamma) if inverse else (f_s * img[nI] ** (1 / gamma) - c_o)
adj_img = (adj_img * max_val).reshape(image.shape).astype(image.dtype)
else:
# from https://en.wikipedia.org/wiki/SRGB
if 1:
a = gamma_break
K0 = a / (gamma - 1)
else:
K0 = gamma_break
a = K0 * (gamma - 1)
alpha = 1 + a
th = alpha ** gamma * (gamma - 1) ** (gamma - 1) / a ** (gamma - 1) / gamma ** gamma
lim = K0 if inverse else K0 / th
img = image.flatten() / max_val
I = img <= lim
nI = np.logical_not(I)
adj_img = np.zeros(image.shape).flatten()
adj_img[I] = (img[I] / th) if inverse else (th * img[I])
adj_img[nI] = (((img[nI] + a) / alpha) ** gamma) if inverse else (alpha * img[nI] ** (1 / gamma) - a)
adj_img = (adj_img * max_val).reshape(image.shape).astype(image.dtype)
# adj_img = np.round(adj_img * max_val).reshape(image.shape).astype(image.dtype)
return adj_img
@staticmethod
def apply_point_spread_fn(img, ratio):
# ratio is how many % of power on central pixel
sd = 1 / math.sqrt(2 * math.pi * ratio)
size = 1 + 2 * math.ceil(sd * 2)
kernel = ImageProc.gkern2d(size, sd)
cv2.filter2D(img, -1, kernel, img)
return img
@staticmethod
@lru_cache(maxsize=5)
def gkern2d(l=5, sig=1.):
"""
creates gaussian kernel with side length l and a sigma of sig
"""
w, h = (l[0], l[1]) if '__iter__' in dir(l) else (l, l)
sx, sy = (sig[0], sig[1]) if '__iter__' in dir(sig) else (sig, sig)
ax = np.arange(-w // 2 + 1., w // 2 + 1.)
ay = np.arange(-h // 2 + 1., h // 2 + 1.)
xx, yy = np.meshgrid(ax, ay)
kernel = np.exp(-((xx / sx) ** 2 + (yy / sy) ** 2) / 2)
return kernel / np.sum(kernel)
@staticmethod
def bsphkern(l=5):
"""
creates a binary spherical kernel
"""
gkern = ImageProc.gkern2d(l=l, sig=l)
limit = gkern[l // 2 if isinstance(l, int) else l[1] // 2, -1] * 0.995
return np.array(gkern >= limit, dtype=np.uint8)
@staticmethod
def fuzzy_kernel(kernel, sig):
w = int(sig // 2)
skernel = np.zeros(tuple(np.array(kernel.shape[:2]) + int(sig)) + kernel.shape[2:3], dtype=kernel.dtype)
skernel[w:w + kernel.shape[0], w:w + kernel.shape[1]] = kernel
gkrn = ImageProc.gkern2d(sig, sig / 2)
skernel = cv2.filter2D(skernel, kernel.shape[2], gkrn)
return skernel
@staticmethod
def _img_max_valid(img):
max = 1.0 if 'float' in str(img.dtype) else 255
assert max != 255 or img.dtype == np.uint8, 'wrong datatype for image: %s' % img.dtype
return max
@staticmethod
def add_stars(img, mask, coef=2, cache=False):
# add power law distributed stars to image
assert img.shape == img.shape[:2], 'works only with grayscale images'
if not cache:
ImageProc._cached_random_stars.cache_clear()
stars = ImageProc._cached_random_stars(coef, img.shape)
# can be over 255, will clip later
img[mask] = np.clip(stars[mask], 0, 600)
return img
@staticmethod
@lru_cache(maxsize=1)
def _cached_random_stars(coef, shape):
return np.random.pareto(coef, shape)
@staticmethod
def add_sensor_noise(img, mean=7, sd=2, cache=False):
if not cache:
ImageProc._cached_sensor_noise.cache_clear()
img += ImageProc._cached_sensor_noise(mean, sd, img.shape)
return img
@staticmethod
@lru_cache(maxsize=1)
def _cached_sensor_noise(mean, sd, shape):
return np.random.normal(mean, sd, shape)
@staticmethod
def process_target_image(image_src):
hist = cv2.calcHist([image_src], [0], None, [256], [0, 256])
if False:
threshold_value = ImageProc.optimal_threshold(hist)
else:
threshold_value = 50
th, image_dst = cv2.threshold(image_src, threshold_value, 255, cv2.THRESH_TOZERO)
return image_dst, hist, threshold_value
@staticmethod
def optimal_threshold(hist, image=None):
if hist is None:
hist = cv2.calcHist([image], [0], None, [256], [0, 256])
tot_px = 256 # sum(hist) -- for some reason get error if divide with pixel count
x = list(range(1, len(hist) + 1))
loghist = np.array(list(map(lambda x: math.log(x + 1) / tot_px, hist)))
def fitfun1(p, x):
return stats.gamma.pdf(x, p[0], loc=0, scale=p[1]) * p[2]
def fitfun2(p, x):
return stats.norm.pdf(x, p[0], p[1]) * p[2]
def fitfun(p, x):
return fitfun1(p[:3], x) + fitfun2(p[3:], x)
def errfun(p, x, y):
tmp = y - fitfun(p, x)
# assert False, 'p:%s, x:%s, y:%s, ffval:%s'%(p, x[0:50], y[0:50], fitfun(p, x[0:50]))
return tmp
shape = 1.5
init = [
shape, np.argmax(loghist) / (shape - 1), 1, # for fitfun1
127, 50, 1, # for fitfun2
]
if not BATCH_MODE or DEBUG:
print('init: %s' % init)
out = optimize.leastsq(errfun, init, args=(x, loghist))
ImageProc.latest_opt = out
if not BATCH_MODE or DEBUG:
print('result: %s' % list(out))
# threshold value where background makes up roughly a fourth of all pixels
bg = reversed(fitfun1(out[0][:3], x))
ast = list(reversed(fitfun2(out[0][3:], x)))
threshold_value = 255 - next((i for i, v in enumerate(bg) if v / ast[i] > 0.33), 255 - 100)
if not BATCH_MODE or DEBUG:
bg_ratio = out[0][:3][2] / out[0][3:][2]
print('threshold_value: %s; bg_ratio: %s' % (threshold_value, bg_ratio))
# plot figure with histogram and estimated distributions
if DEBUG:
from matplotlib import pyplot as plt
plt.clf()
plt.plot(x, fitfun1(out[0][:3], x), label='background')
plt.plot(x, fitfun2(out[0][3:], x), label='foreground')
plt.plot(x, fitfun(out[0], x), label='total fit')
plt.plot(x, loghist, label='log(hist)')
plt.legend()
fig = plt.gcf()
fig.canvas.draw()
data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
w, h = fig.canvas.get_width_height()
data = data.reshape((h * 3, w * 3, 3)) # for some reason get_width_height returns 1/3 of the actual dims
cv2.imshow('histogram fitting', data)
return threshold_value
@staticmethod
def overlay_mask(image, mask):
sc_img = min(image.shape[0], mask.shape[0])/image.shape[0]
sc_mask = min(image.shape[0], mask.shape[0])/mask.shape[0]
img_color = cv2.cvtColor(cv2.resize(image, None, fx=sc_img, fy=sc_img, interpolation=cv2.INTER_CUBIC), cv2.COLOR_GRAY2RGB)
mask_color = cv2.cvtColor(cv2.resize((mask > 0).astype(np.uint8)*255, None, fx=sc_mask, fy=sc_mask, interpolation=cv2.INTER_CUBIC), cv2.COLOR_GRAY2RGB)
mask_color[:, :, 0:2] = 0
return cv2.addWeighted(img_color, 0.5, mask_color, 0.5, 0.0)
@staticmethod
def merge(images):
summed_weights = 1
summed_images = images[0]
for i in range(1, len(images)):
summed_images = cv2.addWeighted(summed_images, summed_weights / (summed_weights + 1),
images[i], 1 / (summed_weights + 1), 0.0)
summed_weights += 1
return summed_images
@staticmethod
def norm_xcorr(sce_img, res_img):
""" calculate normalized cross corralation of images """
if sce_img.shape[:2] != res_img.shape[:2]:
sce_img = cv2.resize(sce_img, None,
fx=res_img.shape[1] / sce_img.shape[1],
fy=res_img.shape[0] / sce_img.shape[0],
interpolation=cv2.INTER_CUBIC)
sce_img = np.atleast_3d(sce_img)
res_img = np.atleast_3d(res_img)
sce_mean, sce_std = cv2.meanStdDev(sce_img)
res_mean, res_std = cv2.meanStdDev(res_img)
stds = sce_std * res_std
if stds == 0:
return 0
corr = (sce_img - sce_mean) * (res_img - res_mean)
nxcorr = np.mean(corr) / stds
if False:
# for debugging
tmp = np.log(corr - np.min(corr) + 0.001)
mint = np.min(tmp)
maxt = np.max(tmp)
tmp = (tmp - mint) * (1 / (maxt - mint))
print('sm %.3f, ss %.3f, rm %.3f, rs %.3f, min %.3f, max %.3f, res %.3f' % (
sce_mean, sce_std, res_mean, res_std, mint, maxt, nxcorr))
cv2.imshow('corr', tmp)
cv2.waitKey()
return nxcorr
| {
"repo_name": "oknuutti/visnav-py",
"path": "visnav/algo/image.py",
"copies": "1",
"size": "16219",
"license": "mit",
"hash": -6727986656705697000,
"line_mean": 36.6310904872,
"line_max": 159,
"alpha_frac": 0.5298107158,
"autogenerated": false,
"ratio": 3.226377561169684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42561882769696835,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
import numpy as np
from opensfm import bow
from opensfm import feature_loader
from opensfm.dataset import DataSetBase
def unnormalized_vlad(features, centers):
"""Compute unnormalized VLAD histograms from a set of
features in relation to centers.
Returns the unnormalized VLAD vector.
"""
vlad = np.zeros(centers.shape, dtype=np.float32)
for f in features:
i = np.argmin(np.linalg.norm(f - centers, axis=1))
vlad[i, :] += f - centers[i]
vlad = np.ndarray.flatten(vlad)
return vlad
def signed_square_root_normalize(v):
"""Compute Signed Square Root (SSR) normalization on
a vector.
Returns the SSR normalized vector.
"""
v = np.sign(v) * np.sqrt(np.abs(v))
v /= np.linalg.norm(v)
return v
def vlad_distances(image, other_images, histograms):
"""Compute VLAD-based distance (L2 on VLAD-histogram)
between an image and other images.
Returns the image, the order of the other images,
and the other images.
"""
if image not in histograms:
return image, [], []
distances = []
other = []
h = histograms[image]
for im2 in other_images:
if im2 != image and im2 in histograms:
h2 = histograms[im2]
distances.append(np.linalg.norm(h - h2))
other.append(im2)
return image, distances, other
class VladCache(object):
@lru_cache(1)
def load_words(self, data: DataSetBase):
words, _ = bow.load_vlad_words_and_frequencies(data.config)
return words
@lru_cache(1000)
def vlad_histogram(self, data: DataSetBase, image):
words = self.load_words(data)
_, features, _, _, _ = feature_loader.instance.load_all_data(
data, image, masked=True
)
if features is None:
return None
vlad = unnormalized_vlad(features, words)
vlad = signed_square_root_normalize(vlad)
return vlad
instance = VladCache()
| {
"repo_name": "oscarlorentzon/OpenSfM",
"path": "opensfm/vlad.py",
"copies": "1",
"size": "2003",
"license": "bsd-2-clause",
"hash": 6615733892999443000,
"line_mean": 26.0675675676,
"line_max": 69,
"alpha_frac": 0.6350474289,
"autogenerated": false,
"ratio": 3.4956369982547995,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.96306844271548,
"avg_score": 0,
"num_lines": 74
} |
from functools import lru_cache
import numpy as np
import lie_learn.spaces.S2 as S2
def change_coordinates(coords, p_from='C', p_to='S'):
"""
Change Spherical to Cartesian coordinates and vice versa, for points x in S^3.
We use the following coordinate system:
https://en.wikipedia.org/wiki/N-sphere#Spherical_coordinates
Except that we use the order (alpha, beta, gamma), where beta ranges from 0 to pi while alpha and gamma range from
0 to 2 pi.
x0 = r * cos(alpha)
x1 = r * sin(alpha) * cos(gamma)
x2 = r * sin(alpha) * sin(gamma) * cos(beta)
x3 = r * sin(alpha * sin(gamma) * sin(beta)
:param conversion:
:param coords:
:return:
"""
if p_from == p_to:
return coords
elif p_from == 'S' and p_to == 'C':
alpha = coords[..., 0]
beta = coords[..., 1]
gamma = coords[..., 2]
r = 1.
out = np.empty(alpha.shape + (4,))
ca = np.cos(alpha)
cb = np.cos(beta)
cc = np.cos(gamma)
sa = np.sin(alpha)
sb = np.sin(beta)
sc = np.sin(gamma)
out[..., 0] = r * ca
out[..., 1] = r * sa * cc
out[..., 2] = r * sa * sc * cb
out[..., 3] = r * sa * sc * sb
return out
elif p_from == 'C' and p_to == 'S':
raise NotImplementedError
x = coords[..., 0]
y = coords[..., 1]
z = coords[..., 2]
w = coords[..., 3]
r = np.sqrt((coords ** 2).sum(axis=-1))
out = np.empty(x.shape + (3,))
out[..., 0] = np.arccos(z) # alpha
out[..., 1] = np.arctan2(y, x) # beta
out[..., 2] = np.arctan2(y, x) # gamma
return out
else:
raise ValueError('Unknown conversion:' + str(p_from) + ' to ' + str(p_to))
def linspace(b, grid_type='SOFT'):
"""
Compute a linspace on the 3-sphere.
Since S3 is ismorphic to SO(3), we use the grid grid_type from:
FFTs on the Rotation Group
Peter J. Kostelec and Daniel N. Rockmore
http://www.cs.dartmouth.edu/~geelong/soft/03-11-060.pdf
:param b:
:return:
"""
# alpha = 2 * np.pi * np.arange(2 * b) / (2. * b)
# beta = np.pi * (2 * np.arange(2 * b) + 1) / (4. * b)
# gamma = 2 * np.pi * np.arange(2 * b) / (2. * b)
beta, alpha = S2.linspace(b, grid_type)
# According to this paper:
# "Sampling sets and quadrature formulae on the rotation group"
# We can just tack a sampling grid for S^1 to a sampling grid for S^2 to get a sampling grid for SO(3).
gamma = 2 * np.pi * np.arange(2 * b) / (2. * b)
return alpha, beta, gamma
def meshgrid(b, grid_type='SOFT'):
return np.meshgrid(*linspace(b, grid_type), indexing='ij')
def integrate(f, normalize=True):
"""
Integrate a function f : S^3 -> R over the 3-sphere S^3, using the invariant integration measure
mu((alpha, beta, gamma)) = dalpha sin(beta) dbeta dgamma
i.e. this returns
int_S^3 f(x) dmu(x) = int_0^2pi int_0^pi int_0^2pi f(alpha, beta, gamma) dalpha sin(beta) dbeta dgamma
:param f: a function of three scalar variables returning a scalar.
:param normalize: if we use the measure dalpha sin(beta) dbeta dgamma,
the integral of f(a,b,c)=1 over the 3-sphere gives 8 pi^2.
If normalize=True, we divide the result of integration by this normalization constant, so that f integrates to 1.
In other words, use the normalized Haar measure.
:return: the integral of f over the 3-sphere
"""
from scipy.integrate import quad
f2 = lambda alpha, gamma: quad(lambda beta: f(alpha, beta, gamma) * np.sin(beta),
a=0,
b=np.pi)[0]
f3 = lambda alpha: quad(lambda gamma: f2(alpha, gamma),
a=0,
b=2 * np.pi)[0]
integral = quad(f3, 0, 2 * np.pi)[0]
if normalize:
return integral / (8 * np.pi ** 2)
else:
return integral
def integrate_quad(f, grid_type, normalize=True, w=None):
"""
Integrate a function f : SO(3) -> R, sampled on a grid of type grid_type, using quadrature weights w.
:param f: an ndarray containing function values on a grid
:param grid_type: the type of grid used to sample f
:param normalize: whether to use the normalized Haar measure or not
:param w: the quadrature weights. If not given, they are computed.
:return: the integral of f over S^2.
"""
if grid_type == 'SOFT':
b = f.shape[0] // 2
if w is None:
w = quadrature_weights(b, grid_type)
integral = np.sum(f * w[None, :, None])
else:
raise NotImplementedError('Unsupported grid_type:', grid_type)
if normalize:
return integral
else:
return integral * 8 * np.pi ** 2
@lru_cache(maxsize=32)
def quadrature_weights(b, grid_type='SOFT'):
"""
Compute quadrature weights for the grid used by Kostelec & Rockmore [1, 2].
This grid is:
alpha = 2 pi i / 2b
beta = pi (2 j + 1) / 4b
gamma = 2 pi k / 2b
where 0 <= i, j, k < 2b are indices
This grid can be obtained from the function: S3.linspace or S3.meshgrid
The quadrature weights for this grid are
w_B(j) = 2/b * sin(pi(2j + 1) / 4b) * sum_{k=0}^{b-1} 1 / (2 k + 1) sin((2j + 1)(2k + 1) pi / 4b)
This is eq. 23 in [1] and eq. 2.15 in [2].
[1] SOFT: SO(3) Fourier Transforms
Peter J. Kostelec and Daniel N. Rockmore
[2] FFTs on the Rotation Group
Peter J. Kostelec · Daniel N. Rockmore
:param b: bandwidth (grid has shape 2b * 2b * 2b)
:return: w: an array of length 2b containing the quadrature weigths
"""
if grid_type == 'SOFT':
k = np.arange(0, b)
w = np.array([(2. / b) * np.sin(np.pi * (2. * j + 1.) / (4. * b)) *
(np.sum((1. / (2 * k + 1))
* np.sin((2 * j + 1) * (2 * k + 1)
* np.pi / (4. * b))))
for j in range(2 * b)])
# This is not in the SOFT documentation, but we found that it is necessary to divide by this factor to
# get correct results.
w /= 2. * ((2 * b) ** 2)
# In the SOFT source, they talk about the following weights being used for
# odd-order transforms. Do not understand this, and the weights used above
# (defined in the SOFT papers) seems to work.
# w = np.array([(2. / b) *
# (np.sum((1. / (2 * k + 1))
# * np.sin((2 * j + 1) * (2 * k + 1)
# * np.pi / (4. * b))))
# for j in range(2 * b)])
return w
else:
raise NotImplementedError | {
"repo_name": "AMLab-Amsterdam/lie_learn",
"path": "lie_learn/spaces/S3.py",
"copies": "1",
"size": "6760",
"license": "mit",
"hash": 1355352404733448000,
"line_mean": 32.631840796,
"line_max": 119,
"alpha_frac": 0.5434235834,
"autogenerated": false,
"ratio": 3.287451361867704,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9326101116579197,
"avg_score": 0.000954765737701362,
"num_lines": 201
} |
from functools import lru_cache
import numpy as np
from ...core.utils import as_id_array
from ...utils.decorators import read_only_array
from ..voronoi.voronoi import DelaunayGraph
class RadialGraphLayout:
@staticmethod
def number_of_nodes(shape):
return np.sum(np.arange(1, shape[0] + 1)) * shape[1] + 1
@staticmethod
def xy_of_node(shape, spacing=1.0, xy_of_center=(0.0, 0.0)):
"""Create the node layout for a radial grid.
Examples
--------
>>> import numpy as np
>>> from landlab.graph.radial.radial import RadialGraphLayout
>>> x, y = RadialGraphLayout.xy_of_node((1, 6))
>>> x
array([ 0. , 1. , 0.5, -0.5, -1. , -0.5, 0.5])
>>> np.round(y / np.sin(np.pi / 3.0))
array([ 0., 0., 1., 1., 0., -1., -1.])
"""
n_rings, n_points = shape
n_nodes = RadialGraphLayout.number_of_nodes(shape)
x = np.empty((n_nodes,), dtype=float)
y = np.empty((n_nodes,), dtype=float)
x[0] = y[0] = 0.0
offset = 1
for ring in range(1, n_rings + 1):
rho = spacing * ring
d_theta = np.pi * 2 / (ring * shape[1])
theta = np.arange(ring * shape[1]) * d_theta
y[offset : offset + len(theta)] = rho * np.sin(theta)
x[offset : offset + len(theta)] = rho * np.cos(theta)
offset += len(theta)
x = np.round(x, decimals=6)
y = np.round(y, decimals=6)
x += xy_of_center[0]
y += xy_of_center[1]
return (x, y)
class RadialGraphExtras:
@property
def shape(self):
return self._shape
@property
def spacing(self):
return self._spacing
@property
def origin(self):
return self._origin
@property
def number_of_rings(self):
return self.shape[0]
@property
def spacing_of_rings(self):
return self.spacing
@property
@lru_cache()
@read_only_array
def radius_of_ring(self):
return np.arange(0, self.number_of_rings, dtype=float) * self.spacing_of_rings
@property
@lru_cache()
@read_only_array
def angle_spacing_of_ring(self):
return 2.0 * np.pi / self.nodes_per_ring
@property
@lru_cache()
@read_only_array
def nodes_per_ring(self):
nodes_per_ring = np.empty(self.number_of_rings, dtype=int)
nodes_per_ring[0] = 1
nodes_per_ring[1:] = np.round(2.0 * np.pi * np.arange(1, self.number_of_rings))
return nodes_per_ring
@property
@lru_cache()
@read_only_array
def ring_at_node(self):
return np.repeat(np.arange(self.number_of_rings), self.nodes_per_ring)
@property
@lru_cache()
@read_only_array
def radius_at_node(self):
return self.radius_of_ring[self.ring_at_node]
@property
@lru_cache()
@read_only_array
def angle_at_node(self):
angle_at_node = np.empty(self.nodes_per_ring.sum(), dtype=float)
angle_at_node[0] = 0.0
offset = 1
for n_nodes in self.nodes_per_ring[1:]:
angles, step = np.linspace(
0.0, 2 * np.pi, n_nodes, endpoint=False, retstep=True, dtype=float
)
angle_at_node[offset : offset + n_nodes] = np.add(
angles, 0.5 * step, out=angles
)
offset += n_nodes
return angle_at_node
class RadialGraph(RadialGraphExtras, DelaunayGraph):
"""Graph of a series of points on concentric circles.
Examples
--------
>>> import numpy as np
>>> from landlab.graph import RadialGraph
>>> graph = RadialGraph((1, 4), sort=True)
>>> graph.number_of_nodes
5
>>> graph.y_of_node
array([-1., 0., 0., 0., 1.])
>>> graph.x_of_node
array([ 0., -1., 0., 1., 0.])
"""
def __init__(self, shape, spacing=1.0, xy_of_center=(0.0, 0.0), sort=False):
"""Create a structured grid of triangles arranged radially.
Parameters
----------
shape : tuple of int
Shape of the graph as number of rings and number of points
in the first ring.
spacing : float, optional
Spacing between rings.
xy_of_center : tuple of float, optional
Coordinates of the node at the center of the grid.
"""
try:
spacing = float(spacing)
except TypeError:
raise TypeError("spacing must be a float")
xy_of_center = tuple(np.broadcast_to(xy_of_center, 2))
x_of_node, y_of_node = RadialGraphLayout.xy_of_node(
shape, spacing=spacing, xy_of_center=xy_of_center
)
self._ring_spacing = spacing
self._shape = tuple(shape)
self._xy_of_center = xy_of_center
DelaunayGraph.__init__(self, (y_of_node, x_of_node))
if sort:
self.sort()
@property
def xy_of_center(self):
return self._xy_of_center
@property
def number_of_rings(self):
"""Number of node rings in grid.
Returns
-------
int
The number of node rings in the radial grid (not counting the
center node).
Examples
--------
>>> import numpy as np
>>> from landlab.graph import RadialGraph
>>> graph = RadialGraph((1, 4))
>>> graph.number_of_rings
1
LLCATS: GINF
"""
return self._shape[0]
@property
def spacing_of_rings(self):
"""Fixed distance between rings.
Returns
-------
ndarray of float
The distance from the center node of each node.
>>> from landlab.graph import RadialGraph
>>> graph = RadialGraph((2, 6), spacing=2.)
>>> graph.spacing_of_rings
2.0
LLCATS: GINF MEAS
"""
return self._ring_spacing
@property
@lru_cache()
def radius_at_node(self):
"""Distance for center node to each node.
Returns
-------
ndarray of float
The distance from the center node of each node.
>>> from landlab.graph import RadialGraph
>>> graph = RadialGraph((2, 6), sort=True)
>>> np.round(graph.radius_at_node, 3)
array([ 2., 2., 2., 2., 2., 1., 1., 2., 1., 0., 1., 2., 1.,
1., 2., 2., 2., 2., 2.])
LLCATS: NINF MEAS
"""
return np.sqrt(
np.square(self.x_of_node - self._xy_of_center[0])
+ np.square(self.y_of_node - self._xy_of_center[1])
)
@property
@lru_cache()
def number_of_nodes_in_ring(self):
"""Number of nodes in each ring.
Returns
-------
ndarray of int
Number of nodes in each ring, excluding the center node.
>>> from landlab.graph import RadialGraph
>>> graph = RadialGraph((4, 6))
>>> graph.number_of_nodes_in_ring
array([ 6, 12, 24, 48])
LLCATS: NINF MEAS
"""
return as_id_array(self._shape[1] * 2 ** np.arange(self.number_of_rings))
| {
"repo_name": "cmshobe/landlab",
"path": "landlab/graph/radial/radial.py",
"copies": "3",
"size": "7137",
"license": "mit",
"hash": -384920638785078850,
"line_mean": 26.2404580153,
"line_max": 87,
"alpha_frac": 0.5376208491,
"autogenerated": false,
"ratio": 3.421380632790029,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5459001481890029,
"avg_score": null,
"num_lines": null
} |
from functools import lru_cache
import requests
@lru_cache(maxsize=100000)
def get_label_from_rxcui(rxcui):
url = "https://rxnav.nlm.nih.gov/REST/rxcui/{}/properties.json".format(rxcui)
d = requests.get(url).json()
if d:
return d['properties']['name']
@lru_cache(maxsize=100000)
def get_rxcui_brandname(rxcui):
url = "https://rxnav.nlm.nih.gov/REST/rxcui/{}/related.json?tty=BN".format(rxcui)
d = requests.get(url).json()
ingredients = {x['tty']: x.get('conceptProperties', []) for x in d['relatedGroup']['conceptGroup'] if
x['tty'] in {'BN'}}
if len(ingredients['BN']):
return ingredients['BN'][0]['rxcui']
@lru_cache(maxsize=100000)
def get_rxcui_ingredient(rxcui, related=None):
"""
Get from ingredient/dose/form to compound
example: rxcui: 1442407 (Camphor 48 MG/ML / Eucalyptus oil 12 MG/ML / Menthol 26 MG/ML Topical Cream)
to: 691178 (Camphor / Eucalyptus oil / Menthol)
https://rxnav.nlm.nih.gov/REST/rxcui/1442407/allrelated.json
http://bioportal.bioontology.org/ontologies/RXNORM?p=classes&conceptid=1442407
Look for MIN, PIN, or IN
types: https://www.nlm.nih.gov/research/umls/rxnorm/docs/2015/appendix5.html
api doc: https://rxnav.nlm.nih.gov/RxNormAPIs.html#uLink=RxNorm_REST_getAllRelatedInfo
if related is given (as output of get_rxcui_related, use that instead of making an api call)
:param rxcui:
:return:
"""
if not related:
related = get_rxcui_related(rxcui)
if related['MIN']:
return related['MIN']
elif related['PIN']:
return related['PIN']
elif related['IN']:
return related['IN']
else:
return None
@lru_cache(maxsize=100000)
def get_rxcui_related(rxcui):
url = "https://rxnav.nlm.nih.gov/REST/rxcui/{}/allrelated.json".format(rxcui)
d = requests.get(url).json()
related = {x['tty']: x.get('conceptProperties', []) for x in
d['allRelatedGroup']['conceptGroup']}
return related
get_rxcui_ingredient(403878)
get_rxcui_brandname(403878)
get_label_from_rxcui(614534)
get_rxcui_ingredient(497184)
| {
"repo_name": "SuLab/scheduled-bots",
"path": "scheduled_bots/drugs/pharma/rxnorm.py",
"copies": "1",
"size": "2134",
"license": "mit",
"hash": -8218233992204811000,
"line_mean": 31.3333333333,
"line_max": 105,
"alpha_frac": 0.6640112465,
"autogenerated": false,
"ratio": 2.8190224570673714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39830337035673713,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.